From c3ce478d4af030fb3d468c5092759c5925382396 Mon Sep 17 00:00:00 2001 From: David Wood Date: Tue, 8 Jul 2025 08:29:46 +0000 Subject: [PATCH 01/27] attr: parse `rustc_scalable_vector(N)` Extend parsing of `ReprOptions` with `rustc_scalable_vector(N)` which optionally accepts a single literal integral value - the base multiple of lanes that are in a scalable vector. Can only be applied to structs. Co-authored-by: Jamie Cunliffe --- compiler/rustc_abi/src/lib.rs | 22 + .../src/attributes/rustc_internal.rs | 23 ++ compiler/rustc_attr_parsing/src/context.rs | 3 +- compiler/rustc_feature/src/builtin_attrs.rs | 4 + .../rustc_hir/src/attrs/data_structures.rs | 8 + .../rustc_hir/src/attrs/encode_cross_crate.rs | 1 + compiler/rustc_middle/src/ty/mod.rs | 30 +- compiler/rustc_middle/src/ty/sty.rs | 8 + compiler/rustc_passes/messages.ftl | 4 + compiler/rustc_passes/src/check_attr.rs | 10 + compiler/rustc_passes/src/errors.rs | 9 + compiler/rustc_span/src/symbol.rs | 1 + tests/ui/scalable-vectors/invalid.rs | 158 ++++++++ tests/ui/scalable-vectors/invalid.stderr | 375 ++++++++++++++++++ tests/ui/stats/input-stats.stderr | 4 +- tests/ui/thir-print/thir-tree-match.stdout | 8 +- 16 files changed, 656 insertions(+), 12 deletions(-) create mode 100644 tests/ui/scalable-vectors/invalid.rs create mode 100644 tests/ui/scalable-vectors/invalid.stderr diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 14e256b8045df..b7262fd8a01f3 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -93,9 +93,11 @@ bitflags! { // Other flags can still inhibit reordering and thus randomization. // The seed stored in `ReprOptions.field_shuffle_seed`. const RANDOMIZE_LAYOUT = 1 << 4; + const IS_SCALABLE = 1 << 5; // Any of these flags being set prevent field reordering optimisation. const FIELD_ORDER_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits() + | ReprFlags::IS_SCALABLE.bits() | ReprFlags::IS_LINEAR.bits(); const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits(); } @@ -132,6 +134,19 @@ impl IntegerType { } } +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[cfg_attr( + feature = "nightly", + derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic) +)] +pub enum ScalableElt { + /// `N` in `rustc_scalable_vector(N)` - the element count of the scalable vector + ElementCount(u128), + /// `rustc_scalable_vector` w/out `N`, used for tuple types of scalable vectors that only + /// contain other scalable vectors + Container, +} + /// Represents the repr options provided by the user. #[derive(Copy, Clone, Debug, Eq, PartialEq, Default)] #[cfg_attr( @@ -143,6 +158,8 @@ pub struct ReprOptions { pub align: Option, pub pack: Option, pub flags: ReprFlags, + /// `#[rustc_scalable_vector]` + pub scalable: Option, /// The seed to be used for randomizing a type's layout /// /// Note: This could technically be a `u128` which would @@ -159,6 +176,11 @@ impl ReprOptions { self.flags.contains(ReprFlags::IS_SIMD) } + #[inline] + pub fn scalable(&self) -> bool { + self.flags.contains(ReprFlags::IS_SCALABLE) + } + #[inline] pub fn c(&self) -> bool { self.flags.contains(ReprFlags::IS_C) diff --git a/compiler/rustc_attr_parsing/src/attributes/rustc_internal.rs b/compiler/rustc_attr_parsing/src/attributes/rustc_internal.rs index b465d2e62ff25..324ad41cd8c91 100644 --- a/compiler/rustc_attr_parsing/src/attributes/rustc_internal.rs +++ b/compiler/rustc_attr_parsing/src/attributes/rustc_internal.rs @@ -51,3 +51,26 @@ impl SingleAttributeParser for RustcObjectLifetimeDefaultParser { Some(AttributeKind::RustcObjectLifetimeDefault) } } + +pub(crate) struct RustcScalableVectorParser; + +impl SingleAttributeParser for RustcScalableVectorParser { + const PATH: &[rustc_span::Symbol] = &[sym::rustc_scalable_vector]; + const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost; + const ON_DUPLICATE: OnDuplicate = OnDuplicate::Error; + const TEMPLATE: AttributeTemplate = template!(Word, List: "count"); + + fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option { + if args.no_args().is_ok() { + return Some(AttributeKind::RustcScalableVector { + element_count: None, + span: cx.attr_span, + }); + } + + parse_single_integer(cx, args).map(|n| AttributeKind::RustcScalableVector { + element_count: Some(n), + span: cx.attr_span, + }) + } +} diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs index 80dfdffdb5548..9eeedb2ded5d7 100644 --- a/compiler/rustc_attr_parsing/src/context.rs +++ b/compiler/rustc_attr_parsing/src/context.rs @@ -44,7 +44,7 @@ use crate::attributes::proc_macro_attrs::{ use crate::attributes::repr::{AlignParser, ReprParser}; use crate::attributes::rustc_internal::{ RustcLayoutScalarValidRangeEnd, RustcLayoutScalarValidRangeStart, - RustcObjectLifetimeDefaultParser, + RustcObjectLifetimeDefaultParser, RustcScalableVectorParser, }; use crate::attributes::semantics::MayDangleParser; use crate::attributes::stability::{ @@ -174,6 +174,7 @@ attribute_parsers!( Single, Single, Single, + Single, Single, Single, Single, diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs index 5c63d4808db2f..eb7f91bda9b49 100644 --- a/compiler/rustc_feature/src/builtin_attrs.rs +++ b/compiler/rustc_feature/src/builtin_attrs.rs @@ -1127,6 +1127,10 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ rustc_force_inline, Normal, template!(Word, NameValueStr: "reason"), WarnFollowing, EncodeCrossCrate::Yes, "`#[rustc_force_inline]` forces a free function to be inlined" ), + rustc_attr!( + rustc_scalable_vector, Normal, template!(List: "count"), WarnFollowing, EncodeCrossCrate::Yes, + "`#[rustc_scalable_vector]` defines a scalable vector type" + ), // ========================================================================== // Internal attributes, Testing: diff --git a/compiler/rustc_hir/src/attrs/data_structures.rs b/compiler/rustc_hir/src/attrs/data_structures.rs index 5f4193154674a..4634fc91e5eb9 100644 --- a/compiler/rustc_hir/src/attrs/data_structures.rs +++ b/compiler/rustc_hir/src/attrs/data_structures.rs @@ -436,6 +436,14 @@ pub enum AttributeKind { /// Represents `#[rustc_object_lifetime_default]`. RustcObjectLifetimeDefault, + /// Represents `#[rustc_scalable_vector(N)]` + RustcScalableVector { + /// The base multiple of lanes that are in a scalable vector, if provided. `element_count` + /// is not provided for representing tuple types. + element_count: Option, + span: Span, + }, + /// Represents `#[should_panic]` ShouldPanic { reason: Option, span: Span }, diff --git a/compiler/rustc_hir/src/attrs/encode_cross_crate.rs b/compiler/rustc_hir/src/attrs/encode_cross_crate.rs index e3a7f0b97a8f0..f3e3316632ad3 100644 --- a/compiler/rustc_hir/src/attrs/encode_cross_crate.rs +++ b/compiler/rustc_hir/src/attrs/encode_cross_crate.rs @@ -70,6 +70,7 @@ impl AttributeKind { RustcLayoutScalarValidRangeEnd(..) => Yes, RustcLayoutScalarValidRangeStart(..) => Yes, RustcObjectLifetimeDefault => No, + RustcScalableVector { .. } => Yes, ShouldPanic { .. } => No, SkipDuringMethodDispatch { .. } => No, SpecializationTrait(..) => No, diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index 73e1661106eaf..5a4604f6556aa 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -24,7 +24,9 @@ pub use assoc::*; pub use generic_args::{GenericArgKind, TermKind, *}; pub use generics::*; pub use intrinsic::IntrinsicDef; -use rustc_abi::{Align, FieldIdx, Integer, IntegerType, ReprFlags, ReprOptions, VariantIdx}; +use rustc_abi::{ + Align, FieldIdx, Integer, IntegerType, ReprFlags, ReprOptions, ScalableElt, VariantIdx, +}; use rustc_ast::node_id::NodeMap; pub use rustc_ast_ir::{Movability, Mutability, try_visit}; use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet}; @@ -1469,9 +1471,20 @@ impl<'tcx> TyCtxt<'tcx> { field_shuffle_seed ^= user_seed; } - if let Some(reprs) = - find_attr!(self.get_all_attrs(did), AttributeKind::Repr { reprs, .. } => reprs) - { + let attrs = self.get_all_attrs(did); + let elt = find_attr!( + attrs, + AttributeKind::RustcScalableVector { element_count, .. } => element_count + ) + .map(|elt| match elt { + Some(n) => ScalableElt::ElementCount(*n), + None => ScalableElt::Container, + }); + if elt.is_some() { + flags.insert(ReprFlags::IS_SCALABLE); + } + + if let Some(reprs) = find_attr!(attrs, AttributeKind::Repr { reprs, .. } => reprs) { for (r, _) in reprs { flags.insert(match *r { attr::ReprRust => ReprFlags::empty(), @@ -1530,7 +1543,14 @@ impl<'tcx> TyCtxt<'tcx> { flags.insert(ReprFlags::IS_LINEAR); } - ReprOptions { int: size, align: max_align, pack: min_pack, flags, field_shuffle_seed } + ReprOptions { + int: size, + align: max_align, + pack: min_pack, + flags, + field_shuffle_seed, + scalable: elt, + } } /// Look up the name of a definition across crates. This does not look at HIR. diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index 72474a6056696..d46e0b60155ab 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -1196,6 +1196,14 @@ impl<'tcx> Ty<'tcx> { } } + #[inline] + pub fn is_scalable_vector(self) -> bool { + match self.kind() { + Adt(def, _) => def.repr().scalable(), + _ => false, + } + } + pub fn sequence_element_type(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> { match self.kind() { Array(ty, _) | Slice(ty) => *ty, diff --git a/compiler/rustc_passes/messages.ftl b/compiler/rustc_passes/messages.ftl index 6a28fe2617edf..d7699881e1518 100644 --- a/compiler/rustc_passes/messages.ftl +++ b/compiler/rustc_passes/messages.ftl @@ -631,6 +631,10 @@ passes_rustc_layout_scalar_valid_range_not_struct = attribute should be applied to a struct .label = not a struct +passes_rustc_scalable_vector = + attribute should be applied to a struct + .label = not a struct + passes_rustc_legacy_const_generics_index = #[rustc_legacy_const_generics] must have one index for each generic parameter .label = generic parameters diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs index 10c532b436aa1..3051349b0afa3 100644 --- a/compiler/rustc_passes/src/check_attr.rs +++ b/compiler/rustc_passes/src/check_attr.rs @@ -271,6 +271,9 @@ impl<'tcx> CheckAttrVisitor<'tcx> { AttributeKind::RustcLayoutScalarValidRangeStart(_num, attr_span) | AttributeKind::RustcLayoutScalarValidRangeEnd(_num, attr_span), ) => self.check_rustc_layout_scalar_valid_range(*attr_span, span, target), + Attribute::Parsed(AttributeKind::RustcScalableVector { + span: attr_span, .. + }) => self.check_rustc_scalable_vector(*attr_span, span, target), Attribute::Parsed(AttributeKind::ExportStable) => { // handled in `check_export` } @@ -1776,6 +1779,13 @@ impl<'tcx> CheckAttrVisitor<'tcx> { } } + fn check_rustc_scalable_vector(&self, attr_span: Span, span: Span, target: Target) { + if target != Target::Struct { + self.dcx().emit_err(errors::RustcScalableVector { attr_span, span }); + return; + } + } + /// Checks if `#[rustc_legacy_const_generics]` is applied to a function and has a valid argument. fn check_rustc_legacy_const_generics( &self, diff --git a/compiler/rustc_passes/src/errors.rs b/compiler/rustc_passes/src/errors.rs index c6ab6b0d60179..432ec4725056c 100644 --- a/compiler/rustc_passes/src/errors.rs +++ b/compiler/rustc_passes/src/errors.rs @@ -535,6 +535,15 @@ pub(crate) struct RustcLayoutScalarValidRangeNotStruct { pub span: Span, } +#[derive(Diagnostic)] +#[diag(passes_rustc_scalable_vector)] +pub(crate) struct RustcScalableVector { + #[primary_span] + pub attr_span: Span, + #[label] + pub span: Span, +} + #[derive(Diagnostic)] #[diag(passes_rustc_legacy_const_generics_only)] pub(crate) struct RustcLegacyConstGenericsOnly { diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index acbed7a9eed81..dd8a9e6b0678d 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -1902,6 +1902,7 @@ symbols! { rustc_reallocator, rustc_regions, rustc_reservation_impl, + rustc_scalable_vector, rustc_serialize, rustc_skip_during_method_dispatch, rustc_specialization_trait, diff --git a/tests/ui/scalable-vectors/invalid.rs b/tests/ui/scalable-vectors/invalid.rs new file mode 100644 index 0000000000000..7b126cb790734 --- /dev/null +++ b/tests/ui/scalable-vectors/invalid.rs @@ -0,0 +1,158 @@ +//@ edition: 2024 +#![allow(internal_features, unused_imports, unused_macros)] +#![feature(extern_types)] +#![feature(gen_blocks)] +#![feature(rustc_attrs)] +#![feature(stmt_expr_attributes)] +#![feature(trait_alias)] + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +extern crate std as other_std; + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +use std::vec::Vec; + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +static _X: u32 = 0; + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +const _Y: u32 = 0; + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +mod bar { +} + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +unsafe extern "C" { + #[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct + static X: &'static u32; + #[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct + type Y; + #[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct + fn foo(); +} + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +type Foo = u32; + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +enum Bar<#[rustc_scalable_vector(4)] T> { +//~^ ERROR: attribute should be applied to a struct + #[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct + Baz(std::marker::PhantomData), +} + +struct Qux { + #[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct + field: u32, +} + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +union FooBar { + x: u32, + y: u32, +} + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +trait FooBaz { + #[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct + type Foo; + #[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct + const Bar: i32; + #[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct + fn foo() {} +} + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +trait FooQux = FooBaz; + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +impl Bar { + #[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct + fn foo() {} +} + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +impl FooBaz for Bar { + type Foo = u32; + const Bar: i32 = 3; +} + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +macro_rules! barqux { ($foo:tt) => { $foo }; } + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +fn barqux(#[rustc_scalable_vector(4)] _x: u32) {} +//~^ ERROR: attribute should be applied to a struct +//~^^ ERROR: allow, cfg, cfg_attr, deny, expect, forbid, and warn are the only allowed built-in attributes in function parameters + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +async fn async_foo() {} + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +gen fn gen_foo() {} + +#[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct +async gen fn async_gen_foo() {} + +fn main() { + let _x = #[rustc_scalable_vector(4)] || { }; +//~^ ERROR: attribute should be applied to a struct + let _y = #[rustc_scalable_vector(4)] 3 + 4; +//~^ ERROR: attribute should be applied to a struct + #[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct + let _z = 3; + + match _z { + #[rustc_scalable_vector(4)] +//~^ ERROR: attribute should be applied to a struct + 1 => (), + _ => (), + } +} + +#[rustc_scalable_vector("4")] +//~^ ERROR: malformed `rustc_scalable_vector` attribute input +struct ArgNotLit(f32); + +#[rustc_scalable_vector(4, 2)] +//~^ ERROR: malformed `rustc_scalable_vector` attribute input +struct ArgMultipleLits(f32); + +#[rustc_scalable_vector(count = "4")] +//~^ ERROR: malformed `rustc_scalable_vector` attribute input +struct ArgKind(f32); + +#[rustc_scalable_vector(4)] +struct Okay(f32); + +#[rustc_scalable_vector] +struct OkayNoArg(f32); diff --git a/tests/ui/scalable-vectors/invalid.stderr b/tests/ui/scalable-vectors/invalid.stderr new file mode 100644 index 0000000000000..5010d6a8ec236 --- /dev/null +++ b/tests/ui/scalable-vectors/invalid.stderr @@ -0,0 +1,375 @@ +error: allow, cfg, cfg_attr, deny, expect, forbid, and warn are the only allowed built-in attributes in function parameters + --> $DIR/invalid.rs:109:11 + | +LL | fn barqux(#[rustc_scalable_vector(4)] _x: u32) {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0539]: malformed `rustc_scalable_vector` attribute input + --> $DIR/invalid.rs:142:1 + | +LL | #[rustc_scalable_vector("4")] + | ^^^^^^^^^^^^^^^^^^^^^^^^---^^ + | | + | expected an integer literal here + | +help: try changing it to one of the following valid forms of the attribute + | +LL - #[rustc_scalable_vector("4")] +LL + #[rustc_scalable_vector(count)] + | +LL - #[rustc_scalable_vector("4")] +LL + #[rustc_scalable_vector] + | + +error[E0805]: malformed `rustc_scalable_vector` attribute input + --> $DIR/invalid.rs:146:1 + | +LL | #[rustc_scalable_vector(4, 2)] + | ^^^^^^^^^^^^^^^^^^^^^^^------^ + | | + | expected a single argument here + | +help: try changing it to one of the following valid forms of the attribute + | +LL - #[rustc_scalable_vector(4, 2)] +LL + #[rustc_scalable_vector(count)] + | +LL - #[rustc_scalable_vector(4, 2)] +LL + #[rustc_scalable_vector] + | + +error[E0539]: malformed `rustc_scalable_vector` attribute input + --> $DIR/invalid.rs:150:1 + | +LL | #[rustc_scalable_vector(count = "4")] + | ^^^^^^^^^^^^^^^^^^^^^^^^-----------^^ + | | + | expected an integer literal here + | +help: try changing it to one of the following valid forms of the attribute + | +LL - #[rustc_scalable_vector(count = "4")] +LL + #[rustc_scalable_vector(count)] + | +LL - #[rustc_scalable_vector(count = "4")] +LL + #[rustc_scalable_vector] + | + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:9:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | extern crate std as other_std; + | ------------------------------ not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:13:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | use std::vec::Vec; + | ------------------ not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:17:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | static _X: u32 = 0; + | ------------------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:21:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | const _Y: u32 = 0; + | ------------------ not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:25:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | / mod bar { +LL | | } + | |_- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:30:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | / unsafe extern "C" { +LL | | #[rustc_scalable_vector(4)] +LL | | +LL | | static X: &'static u32; +... | +LL | | fn foo(); +LL | | } + | |_- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:44:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | type Foo = u32; + | --------------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:48:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | / enum Bar<#[rustc_scalable_vector(4)] T> { +LL | | +LL | | #[rustc_scalable_vector(4)] +... | +LL | | } + | |_- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:50:10 + | +LL | enum Bar<#[rustc_scalable_vector(4)] T> { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:52:5 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | Baz(std::marker::PhantomData), + | -------------------------------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:58:5 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | field: u32, + | ---------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:63:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | / union FooBar { +LL | | x: u32, +LL | | y: u32, +LL | | } + | |_- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:70:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | / trait FooBaz { +LL | | #[rustc_scalable_vector(4)] +LL | | +LL | | type Foo; +... | +LL | | fn foo() {} +LL | | } + | |_- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:84:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | trait FooQux = FooBaz; + | ---------------------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:88:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | / impl Bar { +LL | | #[rustc_scalable_vector(4)] +LL | | +LL | | fn foo() {} +LL | | } + | |_- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:96:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | / impl FooBaz for Bar { +LL | | type Foo = u32; +LL | | const Bar: i32 = 3; +LL | | } + | |_- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:103:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | macro_rules! barqux { ($foo:tt) => { $foo }; } + | ---------------------------------------------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:107:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | fn barqux(#[rustc_scalable_vector(4)] _x: u32) {} + | ------------------------------------------------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:109:11 + | +LL | fn barqux(#[rustc_scalable_vector(4)] _x: u32) {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^-------- + | | + | not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:113:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | async fn async_foo() {} + | ----------------------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:117:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | gen fn gen_foo() {} + | ------------------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:121:1 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | async gen fn async_gen_foo() {} + | ------------------------------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:126:14 + | +LL | let _x = #[rustc_scalable_vector(4)] || { }; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ ------ not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:128:14 + | +LL | let _y = #[rustc_scalable_vector(4)] 3 + 4; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:130:5 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | let _z = 3; + | ----------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:135:9 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | 1 => (), + | ------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:73:5 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | type Foo; + | --------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:76:5 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | const Bar: i32; + | --------------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:79:5 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | fn foo() {} + | ----------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:91:5 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | fn foo() {} + | ----------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:33:5 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | static X: &'static u32; + | ----------------------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:36:5 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | type Y; + | ------- not a struct + +error: attribute should be applied to a struct + --> $DIR/invalid.rs:39:5 + | +LL | #[rustc_scalable_vector(4)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +LL | +LL | fn foo(); + | --------- not a struct + +error: aborting due to 37 previous errors + +Some errors have detailed explanations: E0539, E0805. +For more information about an error, try `rustc --explain E0539`. diff --git a/tests/ui/stats/input-stats.stderr b/tests/ui/stats/input-stats.stderr index 72a9820bb6431..f6ed3fcf4c643 100644 --- a/tests/ui/stats/input-stats.stderr +++ b/tests/ui/stats/input-stats.stderr @@ -93,7 +93,7 @@ hir-stats - Binding 216 (NN.N%) 3 hir-stats Block 288 (NN.N%) 6 48 hir-stats GenericBound 256 (NN.N%) 4 64 hir-stats - Trait 256 (NN.N%) 4 -hir-stats Attribute 200 (NN.N%) 5 40 +hir-stats Attribute 240 (NN.N%) 5 48 hir-stats Variant 144 (NN.N%) 2 72 hir-stats GenericArgs 144 (NN.N%) 3 48 hir-stats FieldDef 128 (NN.N%) 2 64 @@ -119,5 +119,5 @@ hir-stats TraitItemId 8 (NN.N%) 2 4 hir-stats ImplItemId 8 (NN.N%) 2 4 hir-stats ForeignItemId 4 (NN.N%) 1 4 hir-stats ---------------------------------------------------------------- -hir-stats Total 8_584 173 +hir-stats Total 8_624 173 hir-stats ================================================================ diff --git a/tests/ui/thir-print/thir-tree-match.stdout b/tests/ui/thir-print/thir-tree-match.stdout index 2049c531abd99..f95f7e3e5ae5a 100644 --- a/tests/ui/thir-print/thir-tree-match.stdout +++ b/tests/ui/thir-print/thir-tree-match.stdout @@ -94,7 +94,7 @@ body: did: DefId(0:10 ~ thir_tree_match[fcf8]::Foo) variants: [VariantDef { def_id: DefId(0:11 ~ thir_tree_match[fcf8]::Foo::FooOne), ctor: Some((Fn, DefId(0:12 ~ thir_tree_match[fcf8]::Foo::FooOne::{constructor#0}))), name: "FooOne", discr: Relative(0), fields: [FieldDef { did: DefId(0:13 ~ thir_tree_match[fcf8]::Foo::FooOne::0), name: "0", vis: Restricted(DefId(0:0 ~ thir_tree_match[fcf8])), safety: Safe, value: None }], tainted: None, flags: }, VariantDef { def_id: DefId(0:14 ~ thir_tree_match[fcf8]::Foo::FooTwo), ctor: Some((Const, DefId(0:15 ~ thir_tree_match[fcf8]::Foo::FooTwo::{constructor#0}))), name: "FooTwo", discr: Relative(1), fields: [], tainted: None, flags: }] flags: IS_ENUM - repr: ReprOptions { int: None, align: None, pack: None, flags: , field_shuffle_seed: 13397682652773712997 } + repr: ReprOptions { int: None, align: None, pack: None, flags: , scalable: None, field_shuffle_seed: 13397682652773712997 } args: [] variant_index: 0 subpatterns: [ @@ -108,7 +108,7 @@ body: did: DefId(0:3 ~ thir_tree_match[fcf8]::Bar) variants: [VariantDef { def_id: DefId(0:4 ~ thir_tree_match[fcf8]::Bar::First), ctor: Some((Const, DefId(0:5 ~ thir_tree_match[fcf8]::Bar::First::{constructor#0}))), name: "First", discr: Relative(0), fields: [], tainted: None, flags: }, VariantDef { def_id: DefId(0:6 ~ thir_tree_match[fcf8]::Bar::Second), ctor: Some((Const, DefId(0:7 ~ thir_tree_match[fcf8]::Bar::Second::{constructor#0}))), name: "Second", discr: Relative(1), fields: [], tainted: None, flags: }, VariantDef { def_id: DefId(0:8 ~ thir_tree_match[fcf8]::Bar::Third), ctor: Some((Const, DefId(0:9 ~ thir_tree_match[fcf8]::Bar::Third::{constructor#0}))), name: "Third", discr: Relative(2), fields: [], tainted: None, flags: }] flags: IS_ENUM - repr: ReprOptions { int: None, align: None, pack: None, flags: , field_shuffle_seed: 7908585036048874241 } + repr: ReprOptions { int: None, align: None, pack: None, flags: , scalable: None, field_shuffle_seed: 7908585036048874241 } args: [] variant_index: 0 subpatterns: [] @@ -156,7 +156,7 @@ body: did: DefId(0:10 ~ thir_tree_match[fcf8]::Foo) variants: [VariantDef { def_id: DefId(0:11 ~ thir_tree_match[fcf8]::Foo::FooOne), ctor: Some((Fn, DefId(0:12 ~ thir_tree_match[fcf8]::Foo::FooOne::{constructor#0}))), name: "FooOne", discr: Relative(0), fields: [FieldDef { did: DefId(0:13 ~ thir_tree_match[fcf8]::Foo::FooOne::0), name: "0", vis: Restricted(DefId(0:0 ~ thir_tree_match[fcf8])), safety: Safe, value: None }], tainted: None, flags: }, VariantDef { def_id: DefId(0:14 ~ thir_tree_match[fcf8]::Foo::FooTwo), ctor: Some((Const, DefId(0:15 ~ thir_tree_match[fcf8]::Foo::FooTwo::{constructor#0}))), name: "FooTwo", discr: Relative(1), fields: [], tainted: None, flags: }] flags: IS_ENUM - repr: ReprOptions { int: None, align: None, pack: None, flags: , field_shuffle_seed: 13397682652773712997 } + repr: ReprOptions { int: None, align: None, pack: None, flags: , scalable: None, field_shuffle_seed: 13397682652773712997 } args: [] variant_index: 0 subpatterns: [ @@ -208,7 +208,7 @@ body: did: DefId(0:10 ~ thir_tree_match[fcf8]::Foo) variants: [VariantDef { def_id: DefId(0:11 ~ thir_tree_match[fcf8]::Foo::FooOne), ctor: Some((Fn, DefId(0:12 ~ thir_tree_match[fcf8]::Foo::FooOne::{constructor#0}))), name: "FooOne", discr: Relative(0), fields: [FieldDef { did: DefId(0:13 ~ thir_tree_match[fcf8]::Foo::FooOne::0), name: "0", vis: Restricted(DefId(0:0 ~ thir_tree_match[fcf8])), safety: Safe, value: None }], tainted: None, flags: }, VariantDef { def_id: DefId(0:14 ~ thir_tree_match[fcf8]::Foo::FooTwo), ctor: Some((Const, DefId(0:15 ~ thir_tree_match[fcf8]::Foo::FooTwo::{constructor#0}))), name: "FooTwo", discr: Relative(1), fields: [], tainted: None, flags: }] flags: IS_ENUM - repr: ReprOptions { int: None, align: None, pack: None, flags: , field_shuffle_seed: 13397682652773712997 } + repr: ReprOptions { int: None, align: None, pack: None, flags: , scalable: None, field_shuffle_seed: 13397682652773712997 } args: [] variant_index: 1 subpatterns: [] From f95a35449ee4ec38f7f6af0eef3fbb26bdef725c Mon Sep 17 00:00:00 2001 From: David Wood Date: Wed, 9 Jul 2025 16:25:29 +0000 Subject: [PATCH 02/27] hir/trait_sel: prohibit scalable vectors in types Extend well-formedness checking and HIR analysis to prohibit the use of scalable vectors in structs, enums, unions, tuples and arrays. LLVM does not support scalable vectors being members of other types, so these restrictions are necessary. Co-authored-by: Jamie Cunliffe --- compiler/rustc_ast_passes/messages.ftl | 2 + .../rustc_ast_passes/src/ast_validation.rs | 8 ++ compiler/rustc_ast_passes/src/errors.rs | 7 + .../rustc_hir_analysis/src/check/check.rs | 83 +++++++++++- .../rustc_hir_analysis/src/check/wfcheck.rs | 18 ++- .../rustc_trait_selection/src/traits/wf.rs | 18 ++- .../illformed-element-type.rs | 93 +++++++++++++ .../illformed-element-type.stderr | 122 +++++++++++++++++ .../illformed-tuples-of-scalable-vectors.rs | 25 ++++ ...llformed-tuples-of-scalable-vectors.stderr | 26 ++++ .../illformed-within-types.rs | 23 ++++ .../illformed-within-types.stderr | 32 +++++ tests/ui/scalable-vectors/illformed.rs | 59 ++++++++ tests/ui/scalable-vectors/illformed.stderr | 126 ++++++++++++++++++ .../ui/scalable-vectors/wellformed-arrays.rs | 10 ++ tests/ui/scalable-vectors/wellformed.rs | 48 +++++++ 16 files changed, 695 insertions(+), 5 deletions(-) create mode 100644 tests/ui/scalable-vectors/illformed-element-type.rs create mode 100644 tests/ui/scalable-vectors/illformed-element-type.stderr create mode 100644 tests/ui/scalable-vectors/illformed-tuples-of-scalable-vectors.rs create mode 100644 tests/ui/scalable-vectors/illformed-tuples-of-scalable-vectors.stderr create mode 100644 tests/ui/scalable-vectors/illformed-within-types.rs create mode 100644 tests/ui/scalable-vectors/illformed-within-types.stderr create mode 100644 tests/ui/scalable-vectors/illformed.rs create mode 100644 tests/ui/scalable-vectors/illformed.stderr create mode 100644 tests/ui/scalable-vectors/wellformed-arrays.rs create mode 100644 tests/ui/scalable-vectors/wellformed.rs diff --git a/compiler/rustc_ast_passes/messages.ftl b/compiler/rustc_ast_passes/messages.ftl index 53e64439afc66..468bf35f6f4ec 100644 --- a/compiler/rustc_ast_passes/messages.ftl +++ b/compiler/rustc_ast_passes/messages.ftl @@ -228,6 +228,8 @@ ast_passes_precise_capturing_duplicated = duplicate `use<...>` precise capturing ast_passes_precise_capturing_not_allowed_here = `use<...>` precise capturing syntax not allowed in {$loc} +ast_passes_scalable_vector_not_tuple_struct = scalable vectors must be tuple structs + ast_passes_static_without_body = free static item without body .suggestion = provide a definition for the static diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs index 9d3b0969ef35d..3711204d46317 100644 --- a/compiler/rustc_ast_passes/src/ast_validation.rs +++ b/compiler/rustc_ast_passes/src/ast_validation.rs @@ -1182,6 +1182,14 @@ impl<'a> Visitor<'a> for AstValidator<'a> { } ItemKind::Struct(ident, generics, vdata) => { self.with_tilde_const(Some(TildeConstReason::Struct { span: item.span }), |this| { + // Scalable vectors can only be tuple structs + let is_scalable_vector = + item.attrs.iter().any(|attr| attr.has_name(sym::rustc_scalable_vector)); + if is_scalable_vector && !matches!(vdata, VariantData::Tuple(..)) { + this.dcx() + .emit_err(errors::ScalableVectorNotTupleStruct { span: item.span }); + } + match vdata { VariantData::Struct { fields, .. } => { this.visit_attrs_vis_ident(&item.attrs, &item.vis, ident); diff --git a/compiler/rustc_ast_passes/src/errors.rs b/compiler/rustc_ast_passes/src/errors.rs index 60f47490f12a6..0d071c271edf7 100644 --- a/compiler/rustc_ast_passes/src/errors.rs +++ b/compiler/rustc_ast_passes/src/errors.rs @@ -907,3 +907,10 @@ pub(crate) struct AbiMustNotHaveReturnType { pub span: Span, pub abi: ExternAbi, } + +#[derive(Diagnostic)] +#[diag(ast_passes_scalable_vector_not_tuple_struct)] +pub(crate) struct ScalableVectorNotTupleStruct { + #[primary_span] + pub span: Span, +} diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs index 161a8566b04f2..3868870a77553 100644 --- a/compiler/rustc_hir_analysis/src/check/check.rs +++ b/compiler/rustc_hir_analysis/src/check/check.rs @@ -1,7 +1,7 @@ use std::cell::LazyCell; use std::ops::ControlFlow; -use rustc_abi::{ExternAbi, FieldIdx}; +use rustc_abi::{ExternAbi, FieldIdx, ScalableElt}; use rustc_data_structures::unord::{UnordMap, UnordSet}; use rustc_errors::codes::*; use rustc_errors::{EmissionGuarantee, MultiSpan}; @@ -94,7 +94,9 @@ fn check_struct(tcx: TyCtxt<'_>, def_id: LocalDefId) { let span = tcx.def_span(def_id); def.destructor(tcx); // force the destructor to be evaluated - if def.repr().simd() { + if let Some(scalable) = def.repr().scalable { + check_scalable_vector(tcx, span, def_id, scalable); + } else if def.repr().simd() { check_simd(tcx, span, def_id); } @@ -1398,6 +1400,83 @@ fn check_simd(tcx: TyCtxt<'_>, sp: Span, def_id: LocalDefId) { } } +#[tracing::instrument(skip(tcx), level = "debug")] +fn check_scalable_vector(tcx: TyCtxt<'_>, span: Span, def_id: LocalDefId, scalable: ScalableElt) { + let ty = tcx.type_of(def_id).instantiate_identity(); + let ty::Adt(def, args) = ty.kind() else { return }; + if !def.is_struct() { + tcx.dcx().delayed_bug("`rustc_scalable_vector` applied to non-struct"); + return; + } + + let fields = &def.non_enum_variant().fields; + match scalable { + ScalableElt::ElementCount(..) if fields.is_empty() => { + let mut err = + tcx.dcx().struct_span_err(span, "scalable vectors must have a single field"); + err.help("scalable vector types' only field must be a primitive scalar type"); + err.emit(); + return; + } + ScalableElt::ElementCount(..) if fields.len() >= 2 => { + tcx.dcx().struct_span_err(span, "scalable vectors cannot have multiple fields").emit(); + return; + } + ScalableElt::Container if fields.is_empty() => { + let mut err = + tcx.dcx().struct_span_err(span, "scalable vectors must have a single field"); + err.help("tuples of scalable vectors can only contain multiple of the same scalable vector type"); + err.emit(); + return; + } + _ => {} + } + + match scalable { + ScalableElt::ElementCount(..) => { + let element_ty = &fields[FieldIdx::ZERO].ty(tcx, args); + + // Check that `element_ty` only uses types valid in the lanes of a scalable vector + // register: scalar types which directly match a "machine" type - integers, floats and + // bools + match element_ty.kind() { + ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Bool => (), + _ => { + let mut err = tcx.dcx().struct_span_err( + span, + "element type of a scalable vector must be a primitive scalar", + ); + err.help( + "only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted", + ); + err.emit(); + } + } + } + ScalableElt::Container => { + let mut prev_field_ty = None; + for field in fields.iter() { + let element_ty = field.ty(tcx, args); + if let ty::Adt(def, _) = element_ty.kind() + && !def.repr().scalable() + { + tcx.dcx().span_err( + tcx.def_span(field.did), + "scalable vector structs can only have scalable vector fields", + ); + } else if let Some(prev_ty) = prev_field_ty.replace(element_ty) + && prev_ty != element_ty + { + tcx.dcx().span_err( + tcx.def_span(field.did), + "all fields in a scalable vector struct must be the same type", + ); + } + } + } + } +} + pub(super) fn check_packed(tcx: TyCtxt<'_>, sp: Span, def: ty::AdtDef<'_>) { let repr = def.repr(); if repr.packed() { diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs index a62efed13bc79..dcc2e12e15046 100644 --- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs +++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs @@ -2,7 +2,7 @@ use std::cell::LazyCell; use std::ops::{ControlFlow, Deref}; use hir::intravisit::{self, Visitor}; -use rustc_abi::ExternAbi; +use rustc_abi::{ExternAbi, ScalableElt}; use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet}; use rustc_errors::codes::*; use rustc_errors::{Applicability, ErrorGuaranteed, pluralize, struct_span_code_err}; @@ -1039,7 +1039,21 @@ fn check_type_defn<'tcx>( hir_ty.span, Some(WellFormedLoc::Ty(field_id)), ty.into(), - ) + ); + + if matches!(ty.kind(), ty::Adt(def, _) if def.repr().scalable()) + && !matches!(adt_def.repr().scalable, Some(ScalableElt::Container)) + { + // Scalable vectors can only be fields of structs if the type has an + // `rustc_scalable_vector` attribute w/out specifying an element count + tcx.dcx().span_err( + hir_ty.span, + format!( + "scalable vectors cannot be fields of a {}", + adt_def.variant_descr() + ), + ); + } } // For DST, or when drop needs to copy things around, all diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs index adce9850b594c..dd57be4e918e4 100644 --- a/compiler/rustc_trait_selection/src/traits/wf.rs +++ b/compiler/rustc_trait_selection/src/traits/wf.rs @@ -770,9 +770,25 @@ impl<'a, 'tcx> TypeVisitor> for WfPredicates<'a, 'tcx> { } ty::Tuple(tys) => { - if let Some((_last, rest)) = tys.split_last() { + if let Some((last, rest)) = tys.split_last() { for &elem in rest { self.require_sized(elem, ObligationCauseCode::TupleElem); + if elem.is_scalable_vector() && !self.span.is_dummy() { + self.tcx() + .dcx() + .struct_span_err( + self.span, + "scalable vectors cannot be tuple fields", + ) + .emit(); + } + } + + if last.is_scalable_vector() && !self.span.is_dummy() { + self.tcx() + .dcx() + .struct_span_err(self.span, "scalable vectors cannot be tuple fields") + .emit(); } } } diff --git a/tests/ui/scalable-vectors/illformed-element-type.rs b/tests/ui/scalable-vectors/illformed-element-type.rs new file mode 100644 index 0000000000000..469ca006f5e9b --- /dev/null +++ b/tests/ui/scalable-vectors/illformed-element-type.rs @@ -0,0 +1,93 @@ +//@ compile-flags: --crate-type=lib +#![allow(internal_features)] +#![feature(extern_types)] +#![feature(never_type)] +#![feature(rustc_attrs)] + +struct Foo; +enum Bar {} +union Baz { x: u16 } +extern "C" { + type Qux; +} + +#[rustc_scalable_vector(4)] +struct TyChar(char); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(2)] +struct TyConstPtr(*const u8); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(2)] +struct TyMutPtr(*mut u8); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(4)] +struct TyStruct(Foo); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(4)] +struct TyEnum(Bar); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(4)] +struct TyUnion(Baz); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(4)] +struct TyForeign(Qux); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(4)] +struct TyArray([u32; 4]); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(4)] +struct TySlice([u32]); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(4)] +struct TyRef<'a>(&'a u32); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(4)] +struct TyFnPtr(fn(u32) -> u32); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(4)] +struct TyDyn(dyn std::io::Write); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(4)] +struct TyNever(!); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +#[rustc_scalable_vector(4)] +struct TyTuple((u32, u32)); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +type ValidAlias = u32; +type InvalidAlias = String; + +#[rustc_scalable_vector(4)] +struct TyValidAlias(ValidAlias); + +#[rustc_scalable_vector(4)] +struct TyInvalidAlias(InvalidAlias); +//~^ ERROR: element type of a scalable vector must be a primitive scalar + +trait Tr { + type Valid; + type Invalid; +} + +impl Tr for () { + type Valid = u32; + type Invalid = String; +} + +struct TyValidProjection(<() as Tr>::Valid); + +struct TyInvalidProjection(<() as Tr>::Invalid); +// FIXME: element type of a scalable vector must be a primitive scalar diff --git a/tests/ui/scalable-vectors/illformed-element-type.stderr b/tests/ui/scalable-vectors/illformed-element-type.stderr new file mode 100644 index 0000000000000..2d059ed2e7a8c --- /dev/null +++ b/tests/ui/scalable-vectors/illformed-element-type.stderr @@ -0,0 +1,122 @@ +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:15:1 + | +LL | struct TyChar(char); + | ^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:19:1 + | +LL | struct TyConstPtr(*const u8); + | ^^^^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:23:1 + | +LL | struct TyMutPtr(*mut u8); + | ^^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:27:1 + | +LL | struct TyStruct(Foo); + | ^^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:31:1 + | +LL | struct TyEnum(Bar); + | ^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:35:1 + | +LL | struct TyUnion(Baz); + | ^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:39:1 + | +LL | struct TyForeign(Qux); + | ^^^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:43:1 + | +LL | struct TyArray([u32; 4]); + | ^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:47:1 + | +LL | struct TySlice([u32]); + | ^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:51:1 + | +LL | struct TyRef<'a>(&'a u32); + | ^^^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:55:1 + | +LL | struct TyFnPtr(fn(u32) -> u32); + | ^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:59:1 + | +LL | struct TyDyn(dyn std::io::Write); + | ^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:63:1 + | +LL | struct TyNever(!); + | ^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:67:1 + | +LL | struct TyTuple((u32, u32)); + | ^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: element type of a scalable vector must be a primitive scalar + --> $DIR/illformed-element-type.rs:77:1 + | +LL | struct TyInvalidAlias(InvalidAlias); + | ^^^^^^^^^^^^^^^^^^^^^ + | + = help: only `u*`, `i*`, `f*`, `*const`, `*mut` and `bool` types are accepted + +error: aborting due to 15 previous errors + diff --git a/tests/ui/scalable-vectors/illformed-tuples-of-scalable-vectors.rs b/tests/ui/scalable-vectors/illformed-tuples-of-scalable-vectors.rs new file mode 100644 index 0000000000000..ef5a1f0a4f3ff --- /dev/null +++ b/tests/ui/scalable-vectors/illformed-tuples-of-scalable-vectors.rs @@ -0,0 +1,25 @@ +//@ compile-flags: --crate-type=lib +#![allow(internal_features)] +#![feature(rustc_attrs)] + +#[rustc_scalable_vector(2)] +struct ValidI64(i64); + +#[rustc_scalable_vector(4)] +struct ValidI32(i32); + +#[rustc_scalable_vector] +struct Struct { x: ValidI64, y: ValidI64 } +//~^ ERROR: scalable vectors must be tuple structs + +#[rustc_scalable_vector] +struct DifferentVectorTypes(ValidI64, ValidI32); +//~^ ERROR: all fields in a scalable vector struct must be the same type + +#[rustc_scalable_vector] +struct NonVectorTypes(u32, u64); +//~^ ERROR: all fields in a scalable vector struct must be the same type + +#[rustc_scalable_vector] +struct SomeVectorTypes(ValidI64, u64); +//~^ ERROR: all fields in a scalable vector struct must be the same type diff --git a/tests/ui/scalable-vectors/illformed-tuples-of-scalable-vectors.stderr b/tests/ui/scalable-vectors/illformed-tuples-of-scalable-vectors.stderr new file mode 100644 index 0000000000000..973cc024bc0dc --- /dev/null +++ b/tests/ui/scalable-vectors/illformed-tuples-of-scalable-vectors.stderr @@ -0,0 +1,26 @@ +error: scalable vectors must be tuple structs + --> $DIR/illformed-tuples-of-scalable-vectors.rs:12:1 + | +LL | struct Struct { x: ValidI64, y: ValidI64 } + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: all fields in a scalable vector struct must be the same type + --> $DIR/illformed-tuples-of-scalable-vectors.rs:16:39 + | +LL | struct DifferentVectorTypes(ValidI64, ValidI32); + | ^^^^^^^^ + +error: all fields in a scalable vector struct must be the same type + --> $DIR/illformed-tuples-of-scalable-vectors.rs:20:28 + | +LL | struct NonVectorTypes(u32, u64); + | ^^^ + +error: all fields in a scalable vector struct must be the same type + --> $DIR/illformed-tuples-of-scalable-vectors.rs:24:34 + | +LL | struct SomeVectorTypes(ValidI64, u64); + | ^^^ + +error: aborting due to 4 previous errors + diff --git a/tests/ui/scalable-vectors/illformed-within-types.rs b/tests/ui/scalable-vectors/illformed-within-types.rs new file mode 100644 index 0000000000000..81d960e4d4e1a --- /dev/null +++ b/tests/ui/scalable-vectors/illformed-within-types.rs @@ -0,0 +1,23 @@ +//@ compile-flags: --crate-type=lib +#![allow(internal_features)] +#![feature(rustc_attrs)] + +#[rustc_scalable_vector(2)] +struct ValidI64(i64); + +struct Struct { + x: ValidI64, +//~^ ERROR: scalable vectors cannot be fields of a struct + in_tuple: (ValidI64,), +//~^ ERROR: scalable vectors cannot be tuple fields +} + +struct TupleStruct(ValidI64); +//~^ ERROR: scalable vectors cannot be fields of a struct + +enum Enum { + StructVariant { _ty: ValidI64 }, +//~^ ERROR: scalable vectors cannot be fields of a variant + TupleVariant(ValidI64), +//~^ ERROR: scalable vectors cannot be fields of a variant +} diff --git a/tests/ui/scalable-vectors/illformed-within-types.stderr b/tests/ui/scalable-vectors/illformed-within-types.stderr new file mode 100644 index 0000000000000..e76ef26f2aa4b --- /dev/null +++ b/tests/ui/scalable-vectors/illformed-within-types.stderr @@ -0,0 +1,32 @@ +error: scalable vectors cannot be fields of a struct + --> $DIR/illformed-within-types.rs:9:8 + | +LL | x: ValidI64, + | ^^^^^^^^ + +error: scalable vectors cannot be tuple fields + --> $DIR/illformed-within-types.rs:11:15 + | +LL | in_tuple: (ValidI64,), + | ^^^^^^^^^^^ + +error: scalable vectors cannot be fields of a struct + --> $DIR/illformed-within-types.rs:15:20 + | +LL | struct TupleStruct(ValidI64); + | ^^^^^^^^ + +error: scalable vectors cannot be fields of a variant + --> $DIR/illformed-within-types.rs:19:26 + | +LL | StructVariant { _ty: ValidI64 }, + | ^^^^^^^^ + +error: scalable vectors cannot be fields of a variant + --> $DIR/illformed-within-types.rs:21:18 + | +LL | TupleVariant(ValidI64), + | ^^^^^^^^ + +error: aborting due to 5 previous errors + diff --git a/tests/ui/scalable-vectors/illformed.rs b/tests/ui/scalable-vectors/illformed.rs new file mode 100644 index 0000000000000..5af66ad6c7390 --- /dev/null +++ b/tests/ui/scalable-vectors/illformed.rs @@ -0,0 +1,59 @@ +//@ compile-flags: --crate-type=lib +#![allow(internal_features)] +#![feature(rustc_attrs)] + +#[rustc_scalable_vector(4)] +struct NoFieldsStructWithElementCount {} +//~^ ERROR: scalable vectors must have a single field +//~^^ ERROR: scalable vectors must be tuple structs + +#[rustc_scalable_vector(4)] +struct NoFieldsTupleWithElementCount(); +//~^ ERROR: scalable vectors must have a single field + +#[rustc_scalable_vector(4)] +struct NoFieldsUnitWithElementCount; +//~^ ERROR: scalable vectors must have a single field +//~^^ ERROR: scalable vectors must be tuple structs + +#[rustc_scalable_vector] +struct NoFieldsStructWithoutElementCount {} +//~^ ERROR: scalable vectors must have a single field +//~^^ ERROR: scalable vectors must be tuple structs + +#[rustc_scalable_vector] +struct NoFieldsTupleWithoutElementCount(); +//~^ ERROR: scalable vectors must have a single field + +#[rustc_scalable_vector] +struct NoFieldsUnitWithoutElementCount; +//~^ ERROR: scalable vectors must have a single field +//~^^ ERROR: scalable vectors must be tuple structs + +#[rustc_scalable_vector(4)] +struct MultipleFieldsStructWithElementCount { +//~^ ERROR: scalable vectors cannot have multiple fields +//~^^ ERROR: scalable vectors must be tuple structs + _ty: f32, + other: u32, +} + +#[rustc_scalable_vector(4)] +struct MultipleFieldsTupleWithElementCount(f32, u32); +//~^ ERROR: scalable vectors cannot have multiple fields + +#[rustc_scalable_vector] +struct MultipleFieldsStructWithoutElementCount { +//~^ ERROR: scalable vectors must be tuple structs + _ty: f32, + other: u32, +//~^ ERROR: all fields in a scalable vector struct must be the same type +} + +#[rustc_scalable_vector] +struct MultipleFieldsTupleWithoutElementCount(f32, u32); +//~^ ERROR: all fields in a scalable vector struct must be the same type + +#[rustc_scalable_vector(2)] +struct SingleFieldStruct { _ty: f64 } +//~^ ERROR: scalable vectors must be tuple structs diff --git a/tests/ui/scalable-vectors/illformed.stderr b/tests/ui/scalable-vectors/illformed.stderr new file mode 100644 index 0000000000000..92b115664d1c9 --- /dev/null +++ b/tests/ui/scalable-vectors/illformed.stderr @@ -0,0 +1,126 @@ +error: scalable vectors must be tuple structs + --> $DIR/illformed.rs:6:1 + | +LL | struct NoFieldsStructWithElementCount {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: scalable vectors must be tuple structs + --> $DIR/illformed.rs:15:1 + | +LL | struct NoFieldsUnitWithElementCount; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: scalable vectors must be tuple structs + --> $DIR/illformed.rs:20:1 + | +LL | struct NoFieldsStructWithoutElementCount {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: scalable vectors must be tuple structs + --> $DIR/illformed.rs:29:1 + | +LL | struct NoFieldsUnitWithoutElementCount; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: scalable vectors must be tuple structs + --> $DIR/illformed.rs:34:1 + | +LL | / struct MultipleFieldsStructWithElementCount { +LL | | +LL | | +LL | | _ty: f32, +LL | | other: u32, +LL | | } + | |_^ + +error: scalable vectors must be tuple structs + --> $DIR/illformed.rs:46:1 + | +LL | / struct MultipleFieldsStructWithoutElementCount { +LL | | +LL | | _ty: f32, +LL | | other: u32, +LL | | +LL | | } + | |_^ + +error: scalable vectors must be tuple structs + --> $DIR/illformed.rs:58:1 + | +LL | struct SingleFieldStruct { _ty: f64 } + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: scalable vectors must have a single field + --> $DIR/illformed.rs:6:1 + | +LL | struct NoFieldsStructWithElementCount {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = help: scalable vector types' only field must be a primitive scalar type + +error: scalable vectors must have a single field + --> $DIR/illformed.rs:11:1 + | +LL | struct NoFieldsTupleWithElementCount(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = help: scalable vector types' only field must be a primitive scalar type + +error: scalable vectors must have a single field + --> $DIR/illformed.rs:15:1 + | +LL | struct NoFieldsUnitWithElementCount; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = help: scalable vector types' only field must be a primitive scalar type + +error: scalable vectors must have a single field + --> $DIR/illformed.rs:20:1 + | +LL | struct NoFieldsStructWithoutElementCount {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = help: tuples of scalable vectors can only contain multiple of the same scalable vector type + +error: scalable vectors must have a single field + --> $DIR/illformed.rs:25:1 + | +LL | struct NoFieldsTupleWithoutElementCount(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = help: tuples of scalable vectors can only contain multiple of the same scalable vector type + +error: scalable vectors must have a single field + --> $DIR/illformed.rs:29:1 + | +LL | struct NoFieldsUnitWithoutElementCount; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = help: tuples of scalable vectors can only contain multiple of the same scalable vector type + +error: scalable vectors cannot have multiple fields + --> $DIR/illformed.rs:34:1 + | +LL | struct MultipleFieldsStructWithElementCount { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: scalable vectors cannot have multiple fields + --> $DIR/illformed.rs:42:1 + | +LL | struct MultipleFieldsTupleWithElementCount(f32, u32); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: all fields in a scalable vector struct must be the same type + --> $DIR/illformed.rs:49:5 + | +LL | other: u32, + | ^^^^^^^^^^ + +error: all fields in a scalable vector struct must be the same type + --> $DIR/illformed.rs:54:52 + | +LL | struct MultipleFieldsTupleWithoutElementCount(f32, u32); + | ^^^ + +error: aborting due to 17 previous errors + diff --git a/tests/ui/scalable-vectors/wellformed-arrays.rs b/tests/ui/scalable-vectors/wellformed-arrays.rs new file mode 100644 index 0000000000000..b8f0bf291eea4 --- /dev/null +++ b/tests/ui/scalable-vectors/wellformed-arrays.rs @@ -0,0 +1,10 @@ +//@ check-pass +//@ compile-flags: --crate-type=lib +#![feature(rustc_attrs)] + +#[rustc_scalable_vector(16)] +struct ScalableU8(u8); + +fn main() { + let x: [ScalableU8; 4] = todo!(); +} diff --git a/tests/ui/scalable-vectors/wellformed.rs b/tests/ui/scalable-vectors/wellformed.rs new file mode 100644 index 0000000000000..cb6a22d6c4338 --- /dev/null +++ b/tests/ui/scalable-vectors/wellformed.rs @@ -0,0 +1,48 @@ +//@ check-pass +//@ compile-flags: --crate-type=lib +#![feature(rustc_attrs)] + +#[rustc_scalable_vector(16)] +struct ScalableU8(u8); + +#[rustc_scalable_vector(8)] +struct ScalableU16(u16); + +#[rustc_scalable_vector(4)] +struct ScalableU32(u32); + +#[rustc_scalable_vector(2)] +struct ScalableU64(u64); + +#[rustc_scalable_vector(1)] +struct ScalableU128(u128); + +#[rustc_scalable_vector(16)] +struct ScalableI8(i8); + +#[rustc_scalable_vector(8)] +struct ScalableI16(i16); + +#[rustc_scalable_vector(4)] +struct ScalableI32(i32); + +#[rustc_scalable_vector(2)] +struct ScalableI64(i64); + +#[rustc_scalable_vector(1)] +struct ScalableI128(i128); + +#[rustc_scalable_vector(8)] +struct ScalableF16(f32); + +#[rustc_scalable_vector(4)] +struct ScalableF32(f32); + +#[rustc_scalable_vector(2)] +struct ScalableF64(f64); + +#[rustc_scalable_vector(16)] +struct ScalableBool(bool); + +#[rustc_scalable_vector] +struct ScalableTuple(ScalableU8, ScalableU8, ScalableU8); From 12342ef80d6a51dbb0f09907377f7bab2104cfc1 Mon Sep 17 00:00:00 2001 From: David Wood Date: Tue, 8 Jul 2025 09:12:26 +0000 Subject: [PATCH 03/27] core: add `simd_reinterpret` `simd_reinterpret` is a replacement for `transmute`, specifically for use with scalable SIMD types. It is used in the tests for scalable vectors and in stdarch. Co-authored-by: Jamie Cunliffe --- compiler/rustc_codegen_llvm/src/intrinsic.rs | 12 ++++++++++++ compiler/rustc_hir_analysis/src/check/intrinsic.rs | 1 + compiler/rustc_span/src/symbol.rs | 1 + library/core/src/intrinsics/simd.rs | 5 +++++ 4 files changed, 19 insertions(+) diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 7b27e496986ae..fdf1f2af80889 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1236,6 +1236,18 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } + if name == sym::simd_reinterpret { + require_simd!(ret_ty, SimdReturn); + + return Ok(match args[0].val { + OperandValue::Ref(PlaceValue { llval: val, .. }) | OperandValue::Immediate(val) => { + bx.bitcast(val, llret_ty) + } + OperandValue::ZeroSized => bx.const_undef(llret_ty), + OperandValue::Pair(_, _) => todo!(), + }); + } + // every intrinsic below takes a SIMD vector as its first argument let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput); let in_ty = args[0].layout.ty; diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index 4441dd6ebd66a..b939ca46ef5b7 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -621,6 +621,7 @@ pub(crate) fn check_intrinsic_type( } sym::simd_cast | sym::simd_as + | sym::simd_reinterpret | sym::simd_cast_ptr | sym::simd_expose_provenance | sym::simd_with_exposed_provenance => (2, 0, vec![param(0)], param(1)), diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index dd8a9e6b0678d..55f3e8350535b 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -2010,6 +2010,7 @@ symbols! { simd_reduce_mul_unordered, simd_reduce_or, simd_reduce_xor, + simd_reinterpret, simd_relaxed_fma, simd_rem, simd_round, diff --git a/library/core/src/intrinsics/simd.rs b/library/core/src/intrinsics/simd.rs index 19488082cc33d..d36283f81bc7e 100644 --- a/library/core/src/intrinsics/simd.rs +++ b/library/core/src/intrinsics/simd.rs @@ -216,6 +216,11 @@ pub unsafe fn simd_cast(x: T) -> U; #[rustc_nounwind] pub unsafe fn simd_as(x: T) -> U; +/// Replacement for `transmute`, specifically for use with scalable SIMD types. +#[rustc_intrinsic] +#[rustc_nounwind] +pub unsafe fn simd_reinterpret(src: Src) -> Dst; + /// Negates a vector elementwise. /// /// `T` must be a vector of integers or floats. From dace4f8b543856dbc39a16f7b56ecf5606a615f3 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 10 Jul 2025 10:17:44 +0000 Subject: [PATCH 04/27] codegen: implement `repr(scalable)` Introduces `BackendRepr::ScalableVector` corresponding to scalable vector types annotated with `repr(scalable)` which lowers to a scalable vector type in LLVM. Co-authored-by: Jamie Cunliffe --- compiler/rustc_abi/src/callconv.rs | 2 + compiler/rustc_abi/src/layout.rs | 134 ++++++++++++------ compiler/rustc_abi/src/lib.rs | 36 ++++- compiler/rustc_codegen_gcc/src/builder.rs | 4 + .../rustc_codegen_gcc/src/intrinsic/mod.rs | 2 +- compiler/rustc_codegen_gcc/src/type_of.rs | 6 +- compiler/rustc_codegen_llvm/src/builder.rs | 19 +++ compiler/rustc_codegen_llvm/src/intrinsic.rs | 38 ++++- compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 1 + compiler/rustc_codegen_llvm/src/type_.rs | 4 + compiler/rustc_codegen_llvm/src/type_of.rs | 14 +- compiler/rustc_codegen_llvm/src/va_arg.rs | 6 +- compiler/rustc_codegen_ssa/messages.ftl | 1 + compiler/rustc_codegen_ssa/src/errors.rs | 8 ++ compiler/rustc_codegen_ssa/src/mir/operand.rs | 8 +- compiler/rustc_codegen_ssa/src/mir/place.rs | 18 ++- .../rustc_codegen_ssa/src/traits/builder.rs | 1 + .../src/interpret/validity.rs | 2 +- .../src/util/check_validity_requirement.rs | 2 + compiler/rustc_middle/src/ty/sty.rs | 15 +- compiler/rustc_mir_transform/src/gvn.rs | 4 +- compiler/rustc_public/src/abi.rs | 11 +- .../src/unstable/convert/stable/abi.rs | 3 + .../rustc_target/src/callconv/loongarch.rs | 5 +- compiler/rustc_target/src/callconv/mod.rs | 1 + compiler/rustc_target/src/callconv/riscv.rs | 4 +- compiler/rustc_target/src/callconv/x86.rs | 3 + compiler/rustc_target/src/callconv/x86_64.rs | 2 + .../rustc_target/src/callconv/x86_win64.rs | 1 + compiler/rustc_ty_utils/src/abi.rs | 4 +- compiler/rustc_ty_utils/src/layout.rs | 36 ++++- .../rustc_ty_utils/src/layout/invariant.rs | 2 +- tests/codegen-llvm/scalable-vectors/simple.rs | 49 +++++++ tests/ui/scalable-vectors/closure-capture.rs | 51 +++++++ .../scalable-vectors/closure-capture.stderr | 8 ++ tests/ui/scalable-vectors/copy-clone.rs | 31 ++++ tests/ui/scalable-vectors/copy-clone.stderr | 17 +++ tests/ui/scalable-vectors/fn-trait.rs | 13 ++ tests/ui/scalable-vectors/fn-trait.stderr | 8 ++ tests/ui/scalable-vectors/value-type.rs | 37 +++++ tests/ui/scalable-vectors/value-type.stderr | 17 +++ 41 files changed, 553 insertions(+), 75 deletions(-) create mode 100644 tests/codegen-llvm/scalable-vectors/simple.rs create mode 100644 tests/ui/scalable-vectors/closure-capture.rs create mode 100644 tests/ui/scalable-vectors/closure-capture.stderr create mode 100644 tests/ui/scalable-vectors/copy-clone.rs create mode 100644 tests/ui/scalable-vectors/copy-clone.stderr create mode 100644 tests/ui/scalable-vectors/fn-trait.rs create mode 100644 tests/ui/scalable-vectors/fn-trait.stderr create mode 100644 tests/ui/scalable-vectors/value-type.rs create mode 100644 tests/ui/scalable-vectors/value-type.stderr diff --git a/compiler/rustc_abi/src/callconv.rs b/compiler/rustc_abi/src/callconv.rs index a21e1aee9b08a..360f5689cee40 100644 --- a/compiler/rustc_abi/src/callconv.rs +++ b/compiler/rustc_abi/src/callconv.rs @@ -82,6 +82,8 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { })) } + BackendRepr::ScalableVector { .. } => Err(Heterogeneous), + BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => { // Helper for computing `homogeneous_aggregate`, allowing a custom // starting offset (used below for handling variants). diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index c2405553756b9..cd14156d37367 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -11,7 +11,7 @@ use tracing::{debug, trace}; use crate::{ AbiAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer, LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding, - Variants, WrappingRange, + TargetDataLayout, Variants, WrappingRange, }; mod coroutine; @@ -143,58 +143,32 @@ impl LayoutCalculator { }) } - pub fn simd_type< + pub fn scalable_vector_type( + &self, + element: F, + count: u64, + ) -> LayoutCalculatorResult + where FieldIdx: Idx, VariantIdx: Idx, F: AsRef> + fmt::Debug, - >( + { + vector_type_layout(VectorKind::Scalable, self.cx.data_layout(), element, count) + } + + pub fn simd_type( &self, element: F, count: u64, repr_packed: bool, - ) -> LayoutCalculatorResult { - let elt = element.as_ref(); - if count == 0 { - return Err(LayoutCalculatorError::ZeroLengthSimdType); - } else if count > crate::MAX_SIMD_LANES { - return Err(LayoutCalculatorError::OversizedSimdType { - max_lanes: crate::MAX_SIMD_LANES, - }); - } - - let BackendRepr::Scalar(e_repr) = elt.backend_repr else { - return Err(LayoutCalculatorError::NonPrimitiveSimdType(element)); - }; - - // Compute the size and alignment of the vector - let dl = self.cx.data_layout(); - let size = - elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?; - let (repr, align) = if repr_packed && !count.is_power_of_two() { - // Non-power-of-two vectors have padding up to the next power-of-two. - // If we're a packed repr, remove the padding while keeping the alignment as close - // to a vector as possible. - (BackendRepr::Memory { sized: true }, AbiAlign { abi: Align::max_aligned_factor(size) }) - } else { - (BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size)) - }; - let size = size.align_to(align.abi); - - Ok(LayoutData { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldsShape::Arbitrary { - offsets: [Size::ZERO].into(), - memory_index: [0].into(), - }, - backend_repr: repr, - largest_niche: elt.largest_niche, - uninhabited: false, - size, - align, - max_repr_align: None, - unadjusted_abi_align: elt.align.abi, - randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)), - }) + ) -> LayoutCalculatorResult + where + FieldIdx: Idx, + VariantIdx: Idx, + F: AsRef> + fmt::Debug, + { + let kind = if repr_packed { VectorKind::PackedFixed } else { VectorKind::Fixed }; + vector_type_layout(kind, self.cx.data_layout(), element, count) } /// Compute the layout for a coroutine. @@ -455,6 +429,7 @@ impl LayoutCalculator { BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) | BackendRepr::SimdVector { .. } + | BackendRepr::ScalableVector { .. } | BackendRepr::Memory { .. } => repr, }, }; @@ -526,7 +501,8 @@ impl LayoutCalculator { hide_niches(a); hide_niches(b); } - BackendRepr::SimdVector { element, count: _ } => hide_niches(element), + BackendRepr::SimdVector { element, .. } + | BackendRepr::ScalableVector { element, .. } => hide_niches(element), BackendRepr::Memory { sized: _ } => {} } st.largest_niche = None; @@ -1525,3 +1501,67 @@ impl LayoutCalculator { s } } + +enum VectorKind { + /// `#[rustc_scalable_vector]` + Scalable, + /// `#[repr(simd, packed)]` + PackedFixed, + /// `#[repr(simd)]` + Fixed, +} + +fn vector_type_layout( + kind: VectorKind, + dl: &TargetDataLayout, + element: F, + count: u64, +) -> LayoutCalculatorResult +where + FieldIdx: Idx, + VariantIdx: Idx, + F: AsRef> + fmt::Debug, +{ + let elt = element.as_ref(); + if count == 0 { + return Err(LayoutCalculatorError::ZeroLengthSimdType); + } else if count > crate::MAX_SIMD_LANES { + return Err(LayoutCalculatorError::OversizedSimdType { max_lanes: crate::MAX_SIMD_LANES }); + } + + let BackendRepr::Scalar(element) = elt.backend_repr else { + return Err(LayoutCalculatorError::NonPrimitiveSimdType(element)); + }; + + // Compute the size and alignment of the vector + let size = + elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?; + let (repr, align) = match kind { + VectorKind::Scalable => { + (BackendRepr::ScalableVector { element, count }, dl.llvmlike_vector_align(size)) + } + // Non-power-of-two vectors have padding up to the next power-of-two. + // If we're a packed repr, remove the padding while keeping the alignment as close + // to a vector as possible. + VectorKind::PackedFixed if !count.is_power_of_two() => { + (BackendRepr::Memory { sized: true }, AbiAlign { abi: Align::max_aligned_factor(size) }) + } + VectorKind::PackedFixed | VectorKind::Fixed => { + (BackendRepr::SimdVector { element, count }, dl.llvmlike_vector_align(size)) + } + }; + let size = size.align_to(align.abi); + + Ok(LayoutData { + variants: Variants::Single { index: VariantIdx::new(0) }, + fields: FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() }, + backend_repr: repr, + largest_niche: elt.largest_niche, + uninhabited: false, + size, + align, + max_repr_align: None, + unadjusted_abi_align: elt.align.abi, + randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)), + }) +} diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index b7262fd8a01f3..1de0f3627e033 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -1753,6 +1753,10 @@ impl AddressSpace { pub enum BackendRepr { Scalar(Scalar), ScalarPair(Scalar, Scalar), + ScalableVector { + element: Scalar, + count: u64, + }, SimdVector { element: Scalar, count: u64, @@ -1771,6 +1775,9 @@ impl BackendRepr { match *self { BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) + // FIXME(repr_scalable): Scalable vectors are `Sized` while the `sized_hierarchy` + // feature is not yet fully implemented + | BackendRepr::ScalableVector { .. } | BackendRepr::SimdVector { .. } => false, BackendRepr::Memory { sized } => !sized, } @@ -1811,7 +1818,9 @@ impl BackendRepr { BackendRepr::Scalar(s) => Some(s.align(cx).abi), BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi), // The align of a Vector can vary in surprising ways - BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None, + BackendRepr::SimdVector { .. } + | BackendRepr::Memory { .. } + | BackendRepr::ScalableVector { .. } => None, } } @@ -1833,7 +1842,9 @@ impl BackendRepr { Some(size) } // The size of a Vector can vary in surprising ways - BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None, + BackendRepr::SimdVector { .. } + | BackendRepr::Memory { .. } + | BackendRepr::ScalableVector { .. } => None, } } @@ -1848,6 +1859,9 @@ impl BackendRepr { BackendRepr::SimdVector { element: element.to_union(), count } } BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true }, + BackendRepr::ScalableVector { element, count } => { + BackendRepr::ScalableVector { element: element.to_union(), count } + } } } @@ -2088,7 +2102,9 @@ impl LayoutData { /// Returns `true` if this is an aggregate type (including a ScalarPair!) pub fn is_aggregate(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => false, + BackendRepr::Scalar(_) + | BackendRepr::SimdVector { .. } + | BackendRepr::ScalableVector { .. } => false, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true, } } @@ -2182,6 +2198,19 @@ impl LayoutData { self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1 } + /// Returns `true` if the size of the type is only known at runtime. + pub fn is_runtime_sized(&self) -> bool { + matches!(self.backend_repr, BackendRepr::ScalableVector { .. }) + } + + /// Returns the elements count of a scalable vector. + pub fn scalable_vector_element_count(&self) -> Option { + match self.backend_repr { + BackendRepr::ScalableVector { count, .. } => Some(count), + _ => None, + } + } + /// Returns `true` if the type is a ZST and not unsized. /// /// Note that this does *not* imply that the type is irrelevant for layout! It can still have @@ -2190,6 +2219,7 @@ impl LayoutData { match self.backend_repr { BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) + | BackendRepr::ScalableVector { .. } | BackendRepr::SimdVector { .. } => false, BackendRepr::Memory { sized } => sized && self.size.bytes() == 0, } diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index f7a7a3f8c7e35..ebbd3af9bf21b 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -933,6 +933,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { .get_address(self.location) } + fn scalable_alloca(&mut self, _elt: u64, _align: Align, _element_ty: Ty<'_>) -> RValue<'gcc> { + todo!() + } + fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { let block = self.llbb(); let function = block.get_function(); diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index eb0a5336a1f13..c3013396fcf87 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -536,7 +536,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc let layout = self.layout_of(tp_ty).layout; let _use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, - SimdVector { .. } => false, + SimdVector { .. } | ScalableVector { .. } => false, Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index 093f902bc3d86..aeb0b7c08cedd 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -85,6 +85,7 @@ fn uncached_gcc_type<'gcc, 'tcx>( ); } BackendRepr::Memory { .. } => {} + BackendRepr::ScalableVector { .. } => todo!(), } let name = match *layout.ty.kind() { @@ -178,7 +179,9 @@ pub trait LayoutGccExt<'tcx> { impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { fn is_gcc_immediate(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true, + BackendRepr::Scalar(_) + | BackendRepr::SimdVector { .. } + | BackendRepr::ScalableVector { .. } => true, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false, } } @@ -188,6 +191,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { BackendRepr::ScalarPair(..) => true, BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } + | BackendRepr::ScalableVector { .. } | BackendRepr::Memory { .. } => false, } } diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 917d07e3c61bf..d1ac19635d87b 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -617,6 +617,25 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } + fn scalable_alloca(&mut self, elt: u64, align: Align, element_ty: Ty<'_>) -> Self::Value { + let mut bx = Builder::with_cx(self.cx); + bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); + let llvm_ty = match element_ty.kind() { + ty::Bool => bx.type_i1(), + ty::Int(int_ty) => self.cx.type_int_from_ty(*int_ty), + ty::Uint(uint_ty) => self.cx.type_uint_from_ty(*uint_ty), + ty::Float(float_ty) => self.cx.type_float_from_ty(*float_ty), + _ => unreachable!("scalable vectors can only contain a bool, int, uint or float"), + }; + + unsafe { + let ty = llvm::LLVMScalableVectorType(llvm_ty, elt.try_into().unwrap()); + let alloca = llvm::LLVMBuildAlloca(&bx.llbuilder, ty, UNNAMED); + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); + alloca + } + } + fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value { unsafe { let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED); diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index fdf1f2af80889..29493adae3f2c 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -444,6 +444,14 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { let use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, SimdVector { .. } => false, + ScalableVector { .. } => { + tcx.dcx().emit_err(InvalidMonomorphization::NonScalableType { + span, + name: sym::raw_eq, + ty: tp_ty, + }); + return Ok(()); + } Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), @@ -1244,7 +1252,9 @@ fn generic_simd_intrinsic<'ll, 'tcx>( bx.bitcast(val, llret_ty) } OperandValue::ZeroSized => bx.const_undef(llret_ty), - OperandValue::Pair(_, _) => todo!(), + OperandValue::Pair(_, _) => { + return_error!(InvalidMonomorphization::NonScalableType { span, name, ty: ret_ty }) + } }); } @@ -1443,11 +1453,27 @@ fn generic_simd_intrinsic<'ll, 'tcx>( m_len == v_len, InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len } ); - let in_elem_bitwidth = require_int_or_uint_ty!( - m_elem_ty.kind(), - InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty } - ); - let m_i1s = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len); + + let m_i1s = if args[1].layout.ty.is_scalable_vector() { + match m_elem_ty.kind() { + ty::Bool => {} + _ => return_error!(InvalidMonomorphization::MaskWrongElementType { + span, + name, + ty: m_elem_ty + }), + }; + let i1 = bx.type_i1(); + let i1xn = bx.type_scalable_vector(i1, m_len as u64); + bx.trunc(args[0].immediate(), i1xn) + } else { + let in_elem_bitwidth = require_int_or_uint_ty!( + m_elem_ty.kind(), + InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty } + ); + vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len) + }; + return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 75d3d27f74e10..e3e0830f13e8b 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1082,6 +1082,7 @@ unsafe extern "C" { // Operations on array, pointer, and vector types (sequence types) pub(crate) fn LLVMPointerTypeInContext(C: &Context, AddressSpace: c_uint) -> &Type; pub(crate) fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type; + pub(crate) fn LLVMScalableVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type; pub(crate) fn LLVMGetElementType(Ty: &Type) -> &Type; pub(crate) fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint; diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index 893655031388c..8ad1a7dc3e0dd 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -68,6 +68,10 @@ impl<'ll, CX: Borrow>> GenericCx<'ll, CX> { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } } + pub(crate) fn type_scalable_vector(&self, ty: &'ll Type, count: u64) -> &'ll Type { + unsafe { llvm::LLVMScalableVectorType(ty, count as c_uint) } + } + pub(crate) fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { unsafe { let n_args = llvm::LLVMCountParamTypes(ty) as usize; diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 4e7096da502d0..1ae926ed9ee1b 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -23,6 +23,15 @@ fn uncached_llvm_type<'a, 'tcx>( let element = layout.scalar_llvm_type_at(cx, element); return cx.type_vector(element, count); } + BackendRepr::ScalableVector { ref element, count } => { + let element = if element.is_bool() { + cx.type_i1() + } else { + layout.scalar_llvm_type_at(cx, *element) + }; + + return cx.type_scalable_vector(element, count); + } BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {} } @@ -171,7 +180,9 @@ pub(crate) trait LayoutLlvmExt<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_immediate(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true, + BackendRepr::Scalar(_) + | BackendRepr::SimdVector { .. } + | BackendRepr::ScalableVector { .. } => true, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false, } } @@ -181,6 +192,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { BackendRepr::ScalarPair(..) => true, BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } + | BackendRepr::ScalableVector { .. } | BackendRepr::Memory { .. } => false, } } diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index ce079f3cb0af1..74787d20dfcef 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -546,7 +546,7 @@ fn emit_x86_64_sysv64_va_arg<'ll, 'tcx>( registers_for_primitive(scalar1.primitive()); registers_for_primitive(scalar2.primitive()); } - BackendRepr::SimdVector { .. } => { + BackendRepr::SimdVector { .. } | BackendRepr::ScalableVector { .. } => { // Because no instance of VaArgSafe uses a non-scalar `BackendRepr`. unreachable!( "No x86-64 SysV va_arg implementation for {:?}", @@ -686,7 +686,9 @@ fn emit_x86_64_sysv64_va_arg<'ll, 'tcx>( } } // The Previous match on `BackendRepr` means control flow already escaped. - BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => unreachable!(), + BackendRepr::SimdVector { .. } + | BackendRepr::ScalableVector { .. } + | BackendRepr::Memory { .. } => unreachable!(), }; // AMD64-ABI 3.5.7p5: Step 5. Set: diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl index 3ca070acc9de5..0079e2209dfcb 100644 --- a/compiler/rustc_codegen_ssa/messages.ftl +++ b/compiler/rustc_codegen_ssa/messages.ftl @@ -131,6 +131,7 @@ codegen_ssa_invalid_monomorphization_mask_wrong_element_type = invalid monomorph codegen_ssa_invalid_monomorphization_mismatched_lengths = invalid monomorphization of `{$name}` intrinsic: mismatched lengths: mask length `{$m_len}` != other vector length `{$v_len}` +codegen_ssa_invalid_monomorphization_non_scalable_type = invalid monomorphization of `{$name}` intrinsic: expected non-scalable type, found scalable type `{$ty}` codegen_ssa_invalid_monomorphization_return_element = invalid monomorphization of `{$name}` intrinsic: expected return element type `{$in_elem}` (element of input `{$in_ty}`), found `{$ret_ty}` with element type `{$out_ty}` codegen_ssa_invalid_monomorphization_return_integer_type = invalid monomorphization of `{$name}` intrinsic: expected return type with integer elements, found `{$ret_ty}` with non-integer `{$out_ty}` diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index 7ac830bcda919..caab9600d02e9 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -1104,6 +1104,14 @@ pub enum InvalidMonomorphization<'tcx> { expected_element: Ty<'tcx>, vector_type: Ty<'tcx>, }, + + #[diag(codegen_ssa_invalid_monomorphization_non_scalable_type, code = E0511)] + NonScalableType { + #[primary_span] + span: Span, + name: Symbol, + ty: Ty<'tcx>, + }, } pub enum ExpectedPointerMutability { diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index d851c3329802c..d03cc33ee9ebb 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -377,7 +377,9 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { imm } } - BackendRepr::ScalarPair(_, _) | BackendRepr::Memory { .. } => bug!(), + BackendRepr::ScalarPair(_, _) + | BackendRepr::Memory { .. } + | BackendRepr::ScalableVector { .. } => bug!(), }) }; @@ -664,7 +666,9 @@ impl<'a, 'tcx, V: CodegenObject> OperandRefBuilder<'tcx, V> { BackendRepr::ScalarPair(a, b) => { OperandValueBuilder::Pair(Either::Right(a), Either::Right(b)) } - BackendRepr::SimdVector { .. } => OperandValueBuilder::Vector(Either::Right(())), + BackendRepr::SimdVector { .. } | BackendRepr::ScalableVector { .. } => { + OperandValueBuilder::Vector(Either::Right(())) + } BackendRepr::Memory { .. } => { bug!("Cannot use non-ZST Memory-ABI type in operand builder: {layout:?}"); } diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 0090be9fdef06..8f42b835759e0 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -109,7 +109,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, layout: TyAndLayout<'tcx>, ) -> Self { - Self::alloca_size(bx, layout.size, layout) + if layout.is_runtime_sized() { + Self::alloca_runtime_sized(bx, layout) + } else { + Self::alloca_size(bx, layout.size, layout) + } } pub fn alloca_size>( @@ -146,6 +150,18 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) } } + + fn alloca_runtime_sized>( + bx: &mut Bx, + layout: TyAndLayout<'tcx>, + ) -> Self { + let (element_count, ty) = layout.ty.scalable_vector_element_count_and_type(bx.tcx()); + PlaceValue::new_sized( + bx.scalable_alloca(element_count as u64, layout.align.abi, ty), + layout.align.abi, + ) + .with_type(layout) + } } impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index f417d1a7bf724..4e6e5014114a8 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -234,6 +234,7 @@ pub trait BuilderMethods<'a, 'tcx>: fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value; fn alloca(&mut self, size: Size, align: Align) -> Self::Value; + fn scalable_alloca(&mut self, elt: u64, align: Align, element_ty: Ty<'_>) -> Self::Value; fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value; fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value; diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index ed48f53c31056..f482e14a48d8f 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -1318,7 +1318,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, self.visit_scalar(b, b_layout)?; } } - BackendRepr::SimdVector { .. } => { + BackendRepr::SimdVector { .. } | BackendRepr::ScalableVector { .. } => { // No checks here, we assume layout computation gets this right. // (This is harder to check since Miri does not represent these as `Immediate`. We // also cannot use field projections since this might be a newtype around a vector.) diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs index b1f2959875051..c644039205084 100644 --- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs +++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs @@ -119,7 +119,9 @@ fn check_validity_requirement_lax<'tcx>( } BackendRepr::SimdVector { element: s, count } => count == 0 || scalar_allows_raw_init(s), BackendRepr::Memory { .. } => true, // Fields are checked below. + BackendRepr::ScalableVector { element, .. } => scalar_allows_raw_init(element), }; + if !valid { // This is definitely not okay. return Ok(false); diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index d46e0b60155ab..3568cad6144b6 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -7,7 +7,7 @@ use std::borrow::Cow; use std::ops::{ControlFlow, Range}; use hir::def::{CtorKind, DefKind}; -use rustc_abi::{FIRST_VARIANT, FieldIdx, VariantIdx}; +use rustc_abi::{FIRST_VARIANT, FieldIdx, ScalableElt, VariantIdx}; use rustc_errors::{ErrorGuaranteed, MultiSpan}; use rustc_hir as hir; use rustc_hir::LangItem; @@ -1212,6 +1212,19 @@ impl<'tcx> Ty<'tcx> { } } + pub fn scalable_vector_element_count_and_type(self, tcx: TyCtxt<'tcx>) -> (u128, Ty<'tcx>) { + let Adt(def, args) = self.kind() else { + bug!("`scalable_vector_size_and_type` called on invalid type") + }; + let Some(ScalableElt::ElementCount(element_count)) = def.repr().scalable else { + bug!("`scalable_vector_size_and_type` called on non-scalable vector type"); + }; + let variant = def.non_enum_variant(); + assert_eq!(variant.fields.len(), 1); + let field_ty = variant.fields[FieldIdx::ZERO].ty(tcx, args); + (element_count, field_ty) + } + pub fn simd_size_and_type(self, tcx: TyCtxt<'tcx>) -> (u64, Ty<'tcx>) { let Adt(def, args) = self.kind() else { bug!("`simd_size_and_type` called on invalid type") diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 952da2cdf7253..3c4ae4c515aa1 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -1519,7 +1519,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { BackendRepr::ScalarPair(a, b) => { !a.is_always_valid(&self.ecx) || !b.is_always_valid(&self.ecx) } - BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => false, + BackendRepr::SimdVector { .. } + | BackendRepr::ScalableVector { .. } + | BackendRepr::Memory { .. } => false, } } diff --git a/compiler/rustc_public/src/abi.rs b/compiler/rustc_public/src/abi.rs index 7b0882caf1b3e..3ac57818ecbfb 100644 --- a/compiler/rustc_public/src/abi.rs +++ b/compiler/rustc_public/src/abi.rs @@ -233,6 +233,10 @@ pub enum ValueAbi { element: Scalar, count: u64, }, + ScalableVector { + element: Scalar, + count: u64, + }, Aggregate { /// If true, the size is exact, otherwise it's only a lower bound. sized: bool, @@ -243,7 +247,12 @@ impl ValueAbi { /// Returns `true` if the layout corresponds to an unsized type. pub fn is_unsized(&self) -> bool { match *self { - ValueAbi::Scalar(_) | ValueAbi::ScalarPair(..) | ValueAbi::Vector { .. } => false, + ValueAbi::Scalar(_) + | ValueAbi::ScalarPair(..) + | ValueAbi::Vector { .. } + // FIXME(repr_scalable): Scalable vectors are `Sized` while the `sized_hierarchy` + // feature is not yet fully implemented + | ValueAbi::ScalableVector { .. } => false, ValueAbi::Aggregate { sized } => !sized, } } diff --git a/compiler/rustc_public/src/unstable/convert/stable/abi.rs b/compiler/rustc_public/src/unstable/convert/stable/abi.rs index 782e75a930e07..03328d084ee94 100644 --- a/compiler/rustc_public/src/unstable/convert/stable/abi.rs +++ b/compiler/rustc_public/src/unstable/convert/stable/abi.rs @@ -256,6 +256,9 @@ impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr { rustc_abi::BackendRepr::SimdVector { element, count } => { ValueAbi::Vector { element: element.stable(tables, cx), count } } + rustc_abi::BackendRepr::ScalableVector { element, count } => { + ValueAbi::ScalableVector { element: element.stable(tables, cx), count } + } rustc_abi::BackendRepr::Memory { sized } => ValueAbi::Aggregate { sized }, } } diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs index d567ad401bb1f..34e53c69f3b69 100644 --- a/compiler/rustc_target/src/callconv/loongarch.rs +++ b/compiler/rustc_target/src/callconv/loongarch.rs @@ -80,7 +80,10 @@ where } } }, - BackendRepr::SimdVector { .. } => return Err(CannotUseFpConv), + BackendRepr::SimdVector { .. } => { + return Err(CannotUseFpConv); + } + BackendRepr::ScalableVector { .. } => unreachable!(), BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields { FieldsShape::Primitive => { unreachable!("aggregates can't have `FieldsShape::Primitive`") diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs index 63e56744aec96..58e49a507cb2e 100644 --- a/compiler/rustc_target/src/callconv/mod.rs +++ b/compiler/rustc_target/src/callconv/mod.rs @@ -391,6 +391,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> { ), BackendRepr::SimdVector { .. } => PassMode::Direct(ArgAttributes::new()), BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout), + BackendRepr::ScalableVector { .. } => PassMode::Direct(ArgAttributes::new()), }; ArgAbi { layout, mode } } diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs index 161e2c1645f9a..3f6d08aee5528 100644 --- a/compiler/rustc_target/src/callconv/riscv.rs +++ b/compiler/rustc_target/src/callconv/riscv.rs @@ -91,7 +91,9 @@ where } } }, - BackendRepr::SimdVector { .. } => return Err(CannotUseFpConv), + BackendRepr::SimdVector { .. } | BackendRepr::ScalableVector { .. } => { + return Err(CannotUseFpConv); + } BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields { FieldsShape::Primitive => { unreachable!("aggregates can't have `FieldsShape::Primitive`") diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs index 918b71c80c4f0..ce173d84c8d56 100644 --- a/compiler/rustc_target/src/callconv/x86.rs +++ b/compiler/rustc_target/src/callconv/x86.rs @@ -98,6 +98,9 @@ where } false } + BackendRepr::ScalableVector { .. } => { + unreachable!("scalable vectors are unsupported") + } } } diff --git a/compiler/rustc_target/src/callconv/x86_64.rs b/compiler/rustc_target/src/callconv/x86_64.rs index d8db7ed6e4c0f..494c590dc63d3 100644 --- a/compiler/rustc_target/src/callconv/x86_64.rs +++ b/compiler/rustc_target/src/callconv/x86_64.rs @@ -59,6 +59,8 @@ where BackendRepr::SimdVector { .. } => Class::Sse, + BackendRepr::ScalableVector { .. } => panic!("scalable vectors are unsupported"), + BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { let field_off = off + layout.fields.offset(i); diff --git a/compiler/rustc_target/src/callconv/x86_win64.rs b/compiler/rustc_target/src/callconv/x86_win64.rs index 8f8597ea662a8..828eef7d04157 100644 --- a/compiler/rustc_target/src/callconv/x86_win64.rs +++ b/compiler/rustc_target/src/callconv/x86_win64.rs @@ -22,6 +22,7 @@ pub(crate) fn compute_abi_info(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<' // FIXME(eddyb) there should be a size cap here // (probably what clang calls "illegal vectors"). } + BackendRepr::ScalableVector { .. } => unreachable!("scalable vectors are unsupported"), BackendRepr::Scalar(scalar) => { if is_ret && matches!(scalar.primitive(), Primitive::Int(Integer::I128, _)) { if cx.target_spec().rustc_abi == Some(RustcAbi::X86Softfloat) { diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index 89d1dd8cf231b..56a6664eaa5bf 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -398,7 +398,9 @@ fn fn_abi_sanity_check<'tcx>( // `layout.backend_repr` and ignore everything else. We should just reject //`Aggregate` entirely here, but some targets need to be fixed first. match arg.layout.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => {} + BackendRepr::Scalar(_) + | BackendRepr::SimdVector { .. } + | BackendRepr::ScalableVector { .. } => {} BackendRepr::ScalarPair(..) => { panic!("`PassMode::Direct` used for ScalarPair type {}", arg.layout.ty) } diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index 79f7e228e2adc..7e11bb5485ab3 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -3,8 +3,8 @@ use rustc_abi::Integer::{I8, I32}; use rustc_abi::Primitive::{self, Float, Int, Pointer}; use rustc_abi::{ AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Layout, - LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, - VariantIdx, Variants, WrappingRange, + LayoutCalculatorError, LayoutData, Niche, ReprOptions, ScalableElt, Scalar, Size, StructKind, + TagEncoding, VariantIdx, Variants, WrappingRange, }; use rustc_hashes::Hash64; use rustc_index::IndexVec; @@ -537,6 +537,37 @@ fn layout_of_uncached<'tcx>( univariant(tys, kind)? } + // Scalable vector types + // + // ```rust (ignore, example) + // #[rustc_scalable_vector(3)] + // struct svuint32_t(u32); + // ``` + ty::Adt(def, args) + if matches!(def.repr().scalable, Some(ScalableElt::ElementCount(..))) => + { + let Some(element_ty) = def + .is_struct() + .then(|| &def.variant(FIRST_VARIANT).fields) + .filter(|fields| fields.len() == 1) + .map(|fields| fields[FieldIdx::ZERO].ty(tcx, args)) + else { + let guar = tcx + .dcx() + .delayed_bug("#[rustc_scalable_vector] was applied to an invalid type"); + return Err(error(cx, LayoutError::ReferencesError(guar))); + }; + let Some(ScalableElt::ElementCount(element_count)) = def.repr().scalable else { + let guar = tcx + .dcx() + .delayed_bug("#[rustc_scalable_vector] was applied to an invalid type"); + return Err(error(cx, LayoutError::ReferencesError(guar))); + }; + + let element_layout = cx.layout_of(element_ty)?; + map_layout(cx.calc.scalable_vector_type(element_layout, element_count as u64))? + } + // SIMD vector types. ty::Adt(def, args) if def.repr().simd() => { // Supported SIMD vectors are ADTs with a single array field: @@ -560,7 +591,6 @@ fn layout_of_uncached<'tcx>( .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?; let e_ly = cx.layout_of(e_ty)?; - map_layout(cx.calc.simd_type(e_ly, e_len, def.repr().packed()))? } diff --git a/compiler/rustc_ty_utils/src/layout/invariant.rs b/compiler/rustc_ty_utils/src/layout/invariant.rs index 1311ee31182c6..85dbe3273594c 100644 --- a/compiler/rustc_ty_utils/src/layout/invariant.rs +++ b/compiler/rustc_ty_utils/src/layout/invariant.rs @@ -248,7 +248,7 @@ pub(super) fn layout_sanity_check<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayou // And the size has to be element * count plus alignment padding, of course assert!(size == (element_size * count).align_to(align)); } - BackendRepr::Memory { .. } => {} // Nothing to check. + BackendRepr::Memory { .. } | BackendRepr::ScalableVector { .. } => {} // Nothing to check. } } diff --git a/tests/codegen-llvm/scalable-vectors/simple.rs b/tests/codegen-llvm/scalable-vectors/simple.rs new file mode 100644 index 0000000000000..9706b4acab340 --- /dev/null +++ b/tests/codegen-llvm/scalable-vectors/simple.rs @@ -0,0 +1,49 @@ +//@ edition: 2021 +//@ only-aarch64 +#![crate_type = "lib"] +#![allow(incomplete_features, internal_features)] +#![feature(simd_ffi, rustc_attrs, link_llvm_intrinsics)] + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +pub struct svint32_t(i32); + +#[inline(never)] +#[target_feature(enable = "sve")] +pub unsafe fn svdup_n_s32(op: i32) -> svint32_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32(op) } +} + +#[inline] +#[target_feature(enable = "sve,sve2")] +pub unsafe fn svxar_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")] + fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svxar_n_s32(op1, op2, IMM3) } +} + +#[inline(never)] +#[no_mangle] +#[target_feature(enable = "sve,sve2")] +// CHECK: define @pass_as_ref(ptr noalias noundef readonly align 16 captures(none) dereferenceable(16) %a, %b) +pub unsafe fn pass_as_ref(a: &svint32_t, b: svint32_t) -> svint32_t { + // CHECK: load , ptr %a, align 16 + svxar_n_s32::<1>(*a, b) +} + +#[no_mangle] +#[target_feature(enable = "sve,sve2")] +// CHECK: define @test() +pub unsafe fn test() -> svint32_t { + let a = svdup_n_s32(1); + let b = svdup_n_s32(2); + // CHECK: %_0 = call @pass_as_ref(ptr noalias noundef nonnull readonly align 16 dereferenceable(16) %a, %b) + pass_as_ref(&a, b) +} diff --git a/tests/ui/scalable-vectors/closure-capture.rs b/tests/ui/scalable-vectors/closure-capture.rs new file mode 100644 index 0000000000000..d6a45f76e214d --- /dev/null +++ b/tests/ui/scalable-vectors/closure-capture.rs @@ -0,0 +1,51 @@ +//@ compile-flags: --crate-type=lib +//@ only-aarch64 + +#![allow(incomplete_features, internal_features)] +#![feature( + link_llvm_intrinsics, + rustc_attrs, + simd_ffi +)] + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +pub struct svint32_t(i32); + +#[inline(never)] +#[target_feature(enable = "sve")] +pub unsafe fn svdup_n_s32(op: i32) -> svint32_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32(op) } +} + +#[inline] +#[target_feature(enable = "sve,sve2")] +pub unsafe fn svxar_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")] + fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svxar_n_s32(op1, op2, IMM3) } +} + +#[inline(never)] +#[target_feature(enable = "sve,sve2")] +fn run(f: impl Fn() -> ()) { + f(); +} + +#[target_feature(enable = "sve,sve2")] +fn foo() { + unsafe { + let a = svdup_n_s32(42); + run(move || { +//~^ ERROR: scalable vectors cannot be tuple fields + svxar_n_s32::<2>(a, a); + }); + } +} diff --git a/tests/ui/scalable-vectors/closure-capture.stderr b/tests/ui/scalable-vectors/closure-capture.stderr new file mode 100644 index 0000000000000..ea53066988e56 --- /dev/null +++ b/tests/ui/scalable-vectors/closure-capture.stderr @@ -0,0 +1,8 @@ +error: scalable vectors cannot be tuple fields + --> $DIR/closure-capture.rs:46:9 + | +LL | run(move || { + | ^^^ + +error: aborting due to 1 previous error + diff --git a/tests/ui/scalable-vectors/copy-clone.rs b/tests/ui/scalable-vectors/copy-clone.rs new file mode 100644 index 0000000000000..7576b6ec18dcb --- /dev/null +++ b/tests/ui/scalable-vectors/copy-clone.rs @@ -0,0 +1,31 @@ +//@ build-pass +//@ only-aarch64 +#![feature(simd_ffi, rustc_attrs, link_llvm_intrinsics)] + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +pub struct svint32_t(i32); + +#[target_feature(enable = "sve")] +pub unsafe fn svdup_n_s32(op: i32) -> svint32_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; +//~^ WARN: `extern` block uses type `svint32_t`, which is not FFI-safe + } + unsafe { _svdup_n_s32(op) } +} + +#[target_feature(enable = "sve")] +fn require_copy(t: T) {} + +#[target_feature(enable = "sve")] +fn test() { + unsafe { + let a = svdup_n_s32(1); + require_copy(a); + } +} + +fn main() {} diff --git a/tests/ui/scalable-vectors/copy-clone.stderr b/tests/ui/scalable-vectors/copy-clone.stderr new file mode 100644 index 0000000000000..8b07aba8e1c07 --- /dev/null +++ b/tests/ui/scalable-vectors/copy-clone.stderr @@ -0,0 +1,17 @@ +warning: `extern` block uses type `svint32_t`, which is not FFI-safe + --> $DIR/copy-clone.rs:14:37 + | +LL | fn _svdup_n_s32(op: i32) -> svint32_t; + | ^^^^^^^^^ not FFI-safe + | + = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct + = note: this struct has unspecified layout +note: the type is defined here + --> $DIR/copy-clone.rs:8:1 + | +LL | pub struct svint32_t(i32); + | ^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(improper_ctypes)]` on by default + +warning: 1 warning emitted + diff --git a/tests/ui/scalable-vectors/fn-trait.rs b/tests/ui/scalable-vectors/fn-trait.rs new file mode 100644 index 0000000000000..5203b5fa0efd7 --- /dev/null +++ b/tests/ui/scalable-vectors/fn-trait.rs @@ -0,0 +1,13 @@ +#![allow(internal_features)] +#![feature(rustc_attrs)] + +#[rustc_scalable_vector(4)] +pub struct ScalableSimdFloat(f32); + +unsafe fn test(f: T) +where + T: Fn(ScalableSimdFloat), //~ ERROR: scalable vectors cannot be tuple fields +{ +} + +fn main() {} diff --git a/tests/ui/scalable-vectors/fn-trait.stderr b/tests/ui/scalable-vectors/fn-trait.stderr new file mode 100644 index 0000000000000..4d00272dd1b5a --- /dev/null +++ b/tests/ui/scalable-vectors/fn-trait.stderr @@ -0,0 +1,8 @@ +error: scalable vectors cannot be tuple fields + --> $DIR/fn-trait.rs:9:8 + | +LL | T: Fn(ScalableSimdFloat), + | ^^^^^^^^^^^^^^^^^^^^^ + +error: aborting due to 1 previous error + diff --git a/tests/ui/scalable-vectors/value-type.rs b/tests/ui/scalable-vectors/value-type.rs new file mode 100644 index 0000000000000..31a9ee8344eb5 --- /dev/null +++ b/tests/ui/scalable-vectors/value-type.rs @@ -0,0 +1,37 @@ +//@ build-pass +//@ compile-flags: --crate-type=lib +//@ only-aarch64 +#![allow(internal_features)] +#![feature( + link_llvm_intrinsics, + rustc_attrs, + simd_ffi, +)] + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +pub struct svint32_t(i32); + +#[target_feature(enable = "sve")] +pub unsafe fn svdup_n_s32(op: i32) -> svint32_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; +//~^ WARN: `extern` block uses type `svint32_t`, which is not FFI-safe + } + unsafe { _svdup_n_s32(op) } +} + +// Tests that scalable vectors can be locals, arguments and return types. + +#[target_feature(enable = "sve")] +fn id(v: svint32_t) -> svint32_t { v } + +#[target_feature(enable = "sve")] +fn foo() { + unsafe { + let v = svdup_n_s32(1); + let v = id(v); + } +} diff --git a/tests/ui/scalable-vectors/value-type.stderr b/tests/ui/scalable-vectors/value-type.stderr new file mode 100644 index 0000000000000..3fc90ebd874e6 --- /dev/null +++ b/tests/ui/scalable-vectors/value-type.stderr @@ -0,0 +1,17 @@ +warning: `extern` block uses type `svint32_t`, which is not FFI-safe + --> $DIR/value-type.rs:20:37 + | +LL | fn _svdup_n_s32(op: i32) -> svint32_t; + | ^^^^^^^^^ not FFI-safe + | + = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct + = note: this struct has unspecified layout +note: the type is defined here + --> $DIR/value-type.rs:14:1 + | +LL | pub struct svint32_t(i32); + | ^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(improper_ctypes)]` on by default + +warning: 1 warning emitted + From 1edd4a705b382748fd446df450fb4bbf3c351394 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 10 Jul 2025 10:17:53 +0000 Subject: [PATCH 05/27] debuginfo: no spill `` for `N!=16` LLVM doesn't handle stores on `` for `N != 16`, a type used internally in SVE intrinsics. Spilling to the stack to create debuginfo will cause errors during instruction selection. These types that are an internal implementation detail to the intrinsic, so users should never see them types and won't need any debuginfo. Co-authored-by: Jamie Cunliffe --- .../rustc_codegen_ssa/src/mir/debuginfo.rs | 45 ++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index b8f635ab78161..5d171396f4b50 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -2,7 +2,7 @@ use std::collections::hash_map::Entry; use std::marker::PhantomData; use std::ops::Range; -use rustc_abi::{BackendRepr, FieldIdx, FieldsShape, Size, VariantIdx}; +use rustc_abi::{BackendRepr, FieldIdx, FieldsShape, ScalableElt, Size, VariantIdx}; use rustc_data_structures::fx::FxHashMap; use rustc_index::IndexVec; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; @@ -361,6 +361,49 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return; } + // Don't spill `` for `N != 16`: + // + // SVE predicates are only one bit for each byte in an SVE vector (which makes + // sense, the predicate only needs to keep track of whether a lane is + // enabled/disabled). i.e. a `` vector has a `` + // predicate type. `` corresponds to two bytes of storage, + // multiplied by the `vscale`, with one bit for each of the sixteen lanes. + // + // For a vector with fewer elements, such as `svint32_t`/``, + // while only a `` predicate type would be strictly necessary, + // relevant intrinsics still take a `svbool_t`/`` - this is + // because a `` is only half of a byte (for `vscale=1`), and with + // memory being byte-addressable, it's unclear how to store that. + // + // Due to this, LLVM ultimately decided not to support stores of `` + // for `N != 16`. As for `vscale=1` and `N` fewer than sixteen, partial bytes would + // need to be stored (except for `N=8`, but that also isn't supported). `N` can + // never be greater than sixteen as that ends up larger than the 128-bit increment + // size. + // + // Internally, with an intrinsic operating on a `svint32_t`/`` + // (for example), the intrinsic takes the `svbool_t`/`` predicate + // and casts it to a `svbool4_t`/``. Therefore, it's important that + // the `` never spills because that'll cause errors during + // instruction selection. Spilling to the stack to create debuginfo for these + // intermediate values must be avoided and won't degrade the debugging experience + // anyway. + if operand.layout.ty.is_scalable_vector() + && bx.sess().target.arch == "aarch64" + && let ty::Adt(adt, args) = &operand.layout.ty.kind() + && let Some(marker_type_field) = + adt.non_enum_variant().fields.get(FieldIdx::from_u32(0)) + { + let marker_type = marker_type_field.ty(bx.tcx(), args); + // i.e. `` when `N != 16` + if let ty::Slice(element_ty) = marker_type.kind() + && element_ty.is_bool() + && adt.repr().scalable != Some(ScalableElt::ElementCount(16)) + { + return; + } + } + Self::spill_operand_to_stack(*operand, name, bx) } From 86e1b7fd4e7bf077fa1796f70bf3b7cc130da1f5 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 10 Jul 2025 06:18:13 +0000 Subject: [PATCH 06/27] mir_transform: prohibit scalable vectors in async Scalable vectors cannot be members of ADTs and thus cannot be kept over await points in async functions. --- compiler/rustc_mir_transform/src/coroutine.rs | 5 +++ tests/ui/scalable-vectors/async.rs | 44 +++++++++++++++++++ tests/ui/scalable-vectors/async.stderr | 8 ++++ 3 files changed, 57 insertions(+) create mode 100644 tests/ui/scalable-vectors/async.rs create mode 100644 tests/ui/scalable-vectors/async.stderr diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs index 761d5461a996f..25d697e23b58b 100644 --- a/compiler/rustc_mir_transform/src/coroutine.rs +++ b/compiler/rustc_mir_transform/src/coroutine.rs @@ -1856,6 +1856,11 @@ fn check_must_not_suspend_ty<'tcx>( SuspendCheckData { descr_pre: &format!("{}allocator ", data.descr_pre), ..data }, ) } + ty::Adt(def, _) if def.repr().scalable() => { + tcx.dcx() + .span_err(data.source_span, "scalable vectors cannot be held over await points"); + true + } ty::Adt(def, _) => check_must_not_suspend_def(tcx, def.did(), hir_id, data), // FIXME: support adding the attribute to TAITs ty::Alias(ty::Opaque, ty::AliasTy { def_id: def, .. }) => { diff --git a/tests/ui/scalable-vectors/async.rs b/tests/ui/scalable-vectors/async.rs new file mode 100644 index 0000000000000..44970fc86a2ff --- /dev/null +++ b/tests/ui/scalable-vectors/async.rs @@ -0,0 +1,44 @@ +//@ only-aarch64 +//@ edition:2021 + +#![allow(incomplete_features, internal_features)] +#![feature( + core_intrinsics, + simd_ffi, + rustc_attrs, + link_llvm_intrinsics +)] + +use core::intrinsics::simd::simd_reinterpret; + +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +pub struct svint32_t(i32); + +#[target_feature(enable = "sve")] +pub unsafe fn svdup_n_s32(op: i32) -> svint32_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32(op) } +} + +#[target_feature(enable = "sve")] +async fn another() -> i32 { + 42 +} + +#[no_mangle] +#[target_feature(enable = "sve")] +pub async fn test_function() { + unsafe { + let x = svdup_n_s32(1); //~ ERROR: scalable vectors cannot be held over await points + let temp = another().await; + let y: svint32_t = simd_reinterpret(x); + } +} + +fn main() { + let _ = unsafe { test_function() }; +} diff --git a/tests/ui/scalable-vectors/async.stderr b/tests/ui/scalable-vectors/async.stderr new file mode 100644 index 0000000000000..fa81c7b8ed4d1 --- /dev/null +++ b/tests/ui/scalable-vectors/async.stderr @@ -0,0 +1,8 @@ +error: scalable vectors cannot be held over await points + --> $DIR/async.rs:36:13 + | +LL | let x = svdup_n_s32(1); + | ^ + +error: aborting due to 1 previous error + From 95e7b355c78b086a03dcb634d7eb1b22b94fedd9 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 10 Jul 2025 08:46:19 +0000 Subject: [PATCH 07/27] mono: require target feature for scalable vectors Scalable vector types only work with the relevant target features enabled, so require this for any function with the types in its signature. --- compiler/rustc_monomorphize/messages.ftl | 5 +- compiler/rustc_monomorphize/src/errors.rs | 2 + .../src/mono_checks/abi_check.rs | 96 ++++++++++++++----- compiler/rustc_target/src/target_features.rs | 69 ++++++++----- .../require-target-feature.rs | 40 ++++++++ .../require-target-feature.stderr | 25 +++++ 6 files changed, 186 insertions(+), 51 deletions(-) create mode 100644 tests/ui/scalable-vectors/require-target-feature.rs create mode 100644 tests/ui/scalable-vectors/require-target-feature.stderr diff --git a/compiler/rustc_monomorphize/messages.ftl b/compiler/rustc_monomorphize/messages.ftl index 9595a5b5ac7fb..92c5c53493a2a 100644 --- a/compiler/rustc_monomorphize/messages.ftl +++ b/compiler/rustc_monomorphize/messages.ftl @@ -2,7 +2,10 @@ monomorphize_abi_error_disabled_vector_type = this function {$is_call -> [true] call *[false] definition - } uses SIMD vector type `{$ty}` which (with the chosen ABI) requires the `{$required_feature}` target feature, which is not enabled{$is_call -> + } uses {$is_scalable -> + [true] scalable + *[false] SIMD + } vector type `{$ty}` which (with the chosen ABI) requires the `{$required_feature}` target feature, which is not enabled{$is_call -> [true] {" "}in the caller *[false] {""} } diff --git a/compiler/rustc_monomorphize/src/errors.rs b/compiler/rustc_monomorphize/src/errors.rs index 89a78897dea91..a7c52c14ab4e2 100644 --- a/compiler/rustc_monomorphize/src/errors.rs +++ b/compiler/rustc_monomorphize/src/errors.rs @@ -78,6 +78,8 @@ pub(crate) struct AbiErrorDisabledVectorType<'a> { pub ty: Ty<'a>, /// Whether this is a problem at a call site or at a declaration. pub is_call: bool, + /// Whether this is a problem with a fixed length vector or a scalable vector + pub is_scalable: bool, } #[derive(Diagnostic)] diff --git a/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs b/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs index b8c001d357e6c..f5f837d0a87fa 100644 --- a/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs +++ b/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs @@ -10,14 +10,37 @@ use rustc_target::callconv::{FnAbi, PassMode}; use crate::errors; -fn uses_vector_registers(mode: &PassMode, repr: &BackendRepr) -> bool { +/// Are vector registers used? +enum UsesVectorRegisters { + /// e.g. `neon` + FixedVector, + /// e.g. `sve` + ScalableVector, + No, +} + +/// Determines whether the combination of `mode` and `repr` will use fixed vector registers, +/// scalable vector registers or no vector registers. +fn uses_vector_registers(mode: &PassMode, repr: &BackendRepr) -> UsesVectorRegisters { match mode { - PassMode::Ignore | PassMode::Indirect { .. } => false, - PassMode::Cast { pad_i32: _, cast } => { - cast.prefix.iter().any(|r| r.is_some_and(|x| x.kind == RegKind::Vector)) - || cast.rest.unit.kind == RegKind::Vector + PassMode::Ignore | PassMode::Indirect { .. } => UsesVectorRegisters::No, + PassMode::Cast { pad_i32: _, cast } + if cast.prefix.iter().any(|r| r.is_some_and(|x| x.kind == RegKind::Vector)) + || cast.rest.unit.kind == RegKind::Vector => + { + UsesVectorRegisters::FixedVector + } + PassMode::Direct(..) | PassMode::Pair(..) + if matches!(repr, BackendRepr::SimdVector { .. }) => + { + UsesVectorRegisters::FixedVector } - PassMode::Direct(..) | PassMode::Pair(..) => matches!(repr, BackendRepr::SimdVector { .. }), + PassMode::Direct(..) | PassMode::Pair(..) + if matches!(repr, BackendRepr::ScalableVector { .. }) => + { + UsesVectorRegisters::ScalableVector + } + _ => UsesVectorRegisters::No, } } @@ -32,37 +55,60 @@ fn do_check_simd_vector_abi<'tcx>( is_call: bool, loc: impl Fn() -> (Span, HirId), ) { - let feature_def = tcx.sess.target.features_for_correct_vector_abi(); let codegen_attrs = tcx.codegen_fn_attrs(def_id); let have_feature = |feat: Symbol| { - tcx.sess.unstable_target_features.contains(&feat) - || codegen_attrs.target_features.iter().any(|x| x.name == feat) + let target_feats = tcx.sess.unstable_target_features.contains(&feat); + let fn_feats = codegen_attrs.target_features.iter().any(|x| x.name == feat); + target_feats || fn_feats }; for arg_abi in abi.args.iter().chain(std::iter::once(&abi.ret)) { let size = arg_abi.layout.size; - if uses_vector_registers(&arg_abi.mode, &arg_abi.layout.backend_repr) { - // Find the first feature that provides at least this vector size. - let feature = match feature_def.iter().find(|(bits, _)| size.bits() <= *bits) { - Some((_, feature)) => feature, - None => { + match uses_vector_registers(&arg_abi.mode, &arg_abi.layout.backend_repr) { + UsesVectorRegisters::FixedVector => { + let feature_def = tcx.sess.target.features_for_correct_fixed_length_vector_abi(); + // Find the first feature that provides at least this vector size. + let feature = match feature_def.iter().find(|(bits, _)| size.bits() <= *bits) { + Some((_, feature)) => feature, + None => { + let (span, _hir_id) = loc(); + tcx.dcx().emit_err(errors::AbiErrorUnsupportedVectorType { + span, + ty: arg_abi.layout.ty, + is_call, + }); + continue; + } + }; + if !have_feature(Symbol::intern(feature)) { let (span, _hir_id) = loc(); - tcx.dcx().emit_err(errors::AbiErrorUnsupportedVectorType { + tcx.dcx().emit_err(errors::AbiErrorDisabledVectorType { span, + required_feature: feature, ty: arg_abi.layout.ty, is_call, + is_scalable: false, }); + } + } + UsesVectorRegisters::ScalableVector => { + let Some(required_feature) = + tcx.sess.target.features_for_correct_scalable_vector_abi() + else { continue; + }; + if !have_feature(Symbol::intern(required_feature)) { + let (span, _) = loc(); + tcx.dcx().emit_err(errors::AbiErrorDisabledVectorType { + span, + required_feature, + ty: arg_abi.layout.ty, + is_call, + is_scalable: true, + }); } - }; - if !have_feature(Symbol::intern(feature)) { - // Emit error. - let (span, _hir_id) = loc(); - tcx.dcx().emit_err(errors::AbiErrorDisabledVectorType { - span, - required_feature: feature, - ty: arg_abi.layout.ty, - is_call, - }); + } + UsesVectorRegisters::No => { + continue; } } } diff --git a/compiler/rustc_target/src/target_features.rs b/compiler/rustc_target/src/target_features.rs index 297d9ed84c504..14e852789e0c9 100644 --- a/compiler/rustc_target/src/target_features.rs +++ b/compiler/rustc_target/src/target_features.rs @@ -863,17 +863,22 @@ pub fn all_rust_features() -> impl Iterator { // These arrays represent the least-constraining feature that is required for vector types up to a // certain size to have their "proper" ABI on each architecture. // Note that they must be kept sorted by vector size. -const X86_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = +const X86_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = &[(128, "sse"), (256, "avx"), (512, "avx512f")]; // FIXME: might need changes for AVX10. -const AARCH64_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[(128, "neon")]; +const AARCH64_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = + &[(128, "neon")]; // We might want to add "helium" too. -const ARM_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[(128, "neon")]; +const ARM_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = + &[(128, "neon")]; -const POWERPC_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[(128, "altivec")]; -const WASM_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[(128, "simd128")]; -const S390X_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[(128, "vector")]; -const RISCV_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[ +const POWERPC_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = + &[(128, "altivec")]; +const WASM_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = + &[(128, "simd128")]; +const S390X_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = + &[(128, "vector")]; +const RISCV_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = &[ (32, "zvl32b"), (64, "zvl64b"), (128, "zvl128b"), @@ -888,13 +893,16 @@ const RISCV_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[ (65536, "zvl65536b"), ]; // Always error on SPARC, as the necessary target features cannot be enabled in Rust at the moment. -const SPARC_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[/*(64, "vis")*/]; +const SPARC_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = + &[/*(64, "vis")*/]; -const HEXAGON_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = +const HEXAGON_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = &[/*(512, "hvx-length64b"),*/ (1024, "hvx-length128b")]; -const MIPS_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[(128, "msa")]; -const CSKY_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = &[(128, "vdspv1")]; -const LOONGARCH_FEATURES_FOR_CORRECT_VECTOR_ABI: &'static [(u64, &'static str)] = +const MIPS_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = + &[(128, "msa")]; +const CSKY_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = + &[(128, "vdspv1")]; +const LOONGARCH_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI: &'static [(u64, &'static str)] = &[(128, "lsx"), (256, "lasx")]; #[derive(Copy, Clone, Debug)] @@ -927,27 +935,38 @@ impl Target { } } - pub fn features_for_correct_vector_abi(&self) -> &'static [(u64, &'static str)] { + pub fn features_for_correct_fixed_length_vector_abi(&self) -> &'static [(u64, &'static str)] { match &*self.arch { - "x86" | "x86_64" => X86_FEATURES_FOR_CORRECT_VECTOR_ABI, - "aarch64" | "arm64ec" => AARCH64_FEATURES_FOR_CORRECT_VECTOR_ABI, - "arm" => ARM_FEATURES_FOR_CORRECT_VECTOR_ABI, - "powerpc" | "powerpc64" => POWERPC_FEATURES_FOR_CORRECT_VECTOR_ABI, - "loongarch32" | "loongarch64" => LOONGARCH_FEATURES_FOR_CORRECT_VECTOR_ABI, - "riscv32" | "riscv64" => RISCV_FEATURES_FOR_CORRECT_VECTOR_ABI, - "wasm32" | "wasm64" => WASM_FEATURES_FOR_CORRECT_VECTOR_ABI, - "s390x" => S390X_FEATURES_FOR_CORRECT_VECTOR_ABI, - "sparc" | "sparc64" => SPARC_FEATURES_FOR_CORRECT_VECTOR_ABI, - "hexagon" => HEXAGON_FEATURES_FOR_CORRECT_VECTOR_ABI, - "mips" | "mips32r6" | "mips64" | "mips64r6" => MIPS_FEATURES_FOR_CORRECT_VECTOR_ABI, + "x86" | "x86_64" => X86_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI, + "aarch64" | "arm64ec" => AARCH64_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI, + "arm" => ARM_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI, + "powerpc" | "powerpc64" => POWERPC_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI, + "loongarch32" | "loongarch64" => LOONGARCH_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI, + "riscv32" | "riscv64" => RISCV_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI, + "wasm32" | "wasm64" => WASM_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI, + "s390x" => S390X_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI, + "sparc" | "sparc64" => SPARC_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI, + "hexagon" => HEXAGON_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI, + "mips" | "mips32r6" | "mips64" | "mips64r6" => { + MIPS_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI + } "nvptx64" | "bpf" | "m68k" => &[], // no vector ABI - "csky" => CSKY_FEATURES_FOR_CORRECT_VECTOR_ABI, + "csky" => CSKY_FEATURES_FOR_CORRECT_FIXED_LENGTH_VECTOR_ABI, // FIXME: for some tier3 targets, we are overly cautious and always give warnings // when passing args in vector registers. _ => &[], } } + pub fn features_for_correct_scalable_vector_abi(&self) -> Option<&'static str> { + match &*self.arch { + "aarch64" | "arm64ec" => Some("sve"), + "riscv32" | "riscv64" => todo!(), + // Other targets have no scalable vectors. + _ => None, + } + } + pub fn tied_target_features(&self) -> &'static [&'static [&'static str]] { match &*self.arch { "aarch64" | "arm64ec" => AARCH64_TIED_FEATURES, diff --git a/tests/ui/scalable-vectors/require-target-feature.rs b/tests/ui/scalable-vectors/require-target-feature.rs new file mode 100644 index 0000000000000..b3c1d3e510077 --- /dev/null +++ b/tests/ui/scalable-vectors/require-target-feature.rs @@ -0,0 +1,40 @@ +//@ build-fail +//@ compile-flags: --crate-type=lib +//@ only-aarch64 +#![allow(incomplete_features, internal_features)] +#![feature( + simd_ffi, + rustc_attrs, + link_llvm_intrinsics +)] + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(4)] +#[allow(non_camel_case_types)] +pub struct svint32_t(i32); + +#[inline(never)] +#[target_feature(enable = "sve")] +pub unsafe fn svdup_n_s32(op: i32) -> svint32_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; +//~^ WARN: `extern` block uses type `svint32_t`, which is not FFI-safe + } + unsafe { _svdup_n_s32(op) } +} + +pub fn non_annotated_callee(x: svint32_t) {} +//~^ ERROR: this function definition uses scalable vector type `svint32_t` + +#[target_feature(enable = "sve")] +pub fn annotated_callee(x: svint32_t) {} // okay! + +#[target_feature(enable = "sve")] +pub fn caller() { + unsafe { + let a = svdup_n_s32(42); + non_annotated_callee(a); + annotated_callee(a); + } +} diff --git a/tests/ui/scalable-vectors/require-target-feature.stderr b/tests/ui/scalable-vectors/require-target-feature.stderr new file mode 100644 index 0000000000000..85b9e5b6579ce --- /dev/null +++ b/tests/ui/scalable-vectors/require-target-feature.stderr @@ -0,0 +1,25 @@ +warning: `extern` block uses type `svint32_t`, which is not FFI-safe + --> $DIR/require-target-feature.rs:21:37 + | +LL | fn _svdup_n_s32(op: i32) -> svint32_t; + | ^^^^^^^^^ not FFI-safe + | + = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct + = note: this struct has unspecified layout +note: the type is defined here + --> $DIR/require-target-feature.rs:14:1 + | +LL | pub struct svint32_t(i32); + | ^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(improper_ctypes)]` on by default + +error: this function definition uses scalable vector type `svint32_t` which (with the chosen ABI) requires the `sve` target feature, which is not enabled + --> $DIR/require-target-feature.rs:27:1 + | +LL | pub fn non_annotated_callee(x: svint32_t) {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ function defined here + | + = help: consider enabling it globally (`-C target-feature=+sve`) or locally (`#[target_feature(enable="sve")]`) + +error: aborting due to 1 previous error; 1 warning emitted + From 26fb3aee6b9db94fd3f0f20441af1c1480b8433e Mon Sep 17 00:00:00 2001 From: David Wood Date: Mon, 14 Jul 2025 12:30:36 +0000 Subject: [PATCH 08/27] rust-analyzer: `rustc_scalable_vector` Trivial changes to rust-analyzer to keep it compiling with changes to `ReprOptions`. --- .../rust-analyzer/crates/hir-def/src/attr.rs | 27 ++++++++++++------- .../rust-analyzer/crates/hir-ty/src/layout.rs | 13 ++++++--- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/src/tools/rust-analyzer/crates/hir-def/src/attr.rs b/src/tools/rust-analyzer/crates/hir-def/src/attr.rs index 53250510f875c..c0ab09135a4e6 100644 --- a/src/tools/rust-analyzer/crates/hir-def/src/attr.rs +++ b/src/tools/rust-analyzer/crates/hir-def/src/attr.rs @@ -6,30 +6,30 @@ use base_db::Crate; use cfg::{CfgExpr, CfgOptions}; use either::Either; use hir_expand::{ - HirFileId, InFile, - attrs::{Attr, AttrId, RawAttrs, collect_attrs}, + attrs::{collect_attrs, Attr, AttrId, RawAttrs}, span_map::SpanMapRef, + HirFileId, InFile, }; -use intern::{Symbol, sym}; +use intern::{sym, Symbol}; use la_arena::{ArenaMap, Idx, RawIdx}; use mbe::DelimiterKind; use rustc_abi::ReprOptions; use span::AstIdNode; use syntax::{ - AstPtr, ast::{self, HasAttrs}, + AstPtr, }; use triomphe::Arc; use tt::iter::{TtElement, TtIter}; use crate::{ - AdtId, AstIdLoc, AttrDefId, GenericParamId, HasModule, LocalFieldId, Lookup, MacroId, - VariantId, db::DefDatabase, item_tree::block_item_tree_query, lang_item::LangItem, nameres::{ModuleOrigin, ModuleSource}, src::{HasChildSource, HasSource}, + AdtId, AstIdLoc, AttrDefId, GenericParamId, HasModule, LocalFieldId, Lookup, MacroId, + VariantId, }; /// Desugared attributes of an item post `cfg_attr` expansion. @@ -199,7 +199,11 @@ impl Attrs { #[inline] pub(crate) fn is_cfg_enabled(&self, cfg_options: &CfgOptions) -> Result<(), CfgExpr> { self.cfgs().try_for_each(|cfg| { - if cfg_options.check(&cfg) != Some(false) { Ok(()) } else { Err(cfg) } + if cfg_options.check(&cfg) != Some(false) { + Ok(()) + } else { + Err(cfg) + } }) } @@ -331,7 +335,7 @@ fn parse_rustc_legacy_const_generics(tt: &crate::tt::TopSubtree) -> Box<[u32]> { } fn merge_repr(this: &mut ReprOptions, other: ReprOptions) { - let ReprOptions { int, align, pack, flags, field_shuffle_seed: _ } = this; + let ReprOptions { int, align, pack, flags, scalable, field_shuffle_seed: _ } = this; flags.insert(other.flags); *align = (*align).max(other.align); *pack = match (*pack, other.pack) { @@ -341,6 +345,9 @@ fn merge_repr(this: &mut ReprOptions, other: ReprOptions) { if other.int.is_some() { *int = other.int; } + if other.scalable.is_some() { + *scalable = other.scalable; + } } fn parse_repr_tt(tt: &crate::tt::TopSubtree) -> Option { @@ -852,8 +859,8 @@ mod tests { use hir_expand::span_map::{RealSpanMap, SpanMap}; use span::FileId; - use syntax::{AstNode, TextRange, ast}; - use syntax_bridge::{DocCommentDesugarMode, syntax_node_to_token_tree}; + use syntax::{ast, AstNode, TextRange}; + use syntax_bridge::{syntax_node_to_token_tree, DocCommentDesugarMode}; use crate::attr::{DocAtom, DocExpr}; diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs index 107da6a5af6d6..3692119aabded 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs @@ -4,11 +4,11 @@ use std::fmt; use chalk_ir::{AdtId, FloatTy, IntTy, TyKind, UintTy}; use hir_def::{ - LocalFieldId, StructId, layout::{ Float, Integer, LayoutCalculator, LayoutCalculatorError, LayoutData, Primitive, ReprOptions, Scalar, StructKind, TargetDataLayout, WrappingRange, }, + LocalFieldId, StructId, }; use la_arena::{Idx, RawIdx}; use rustc_abi::AddressSpace; @@ -17,11 +17,11 @@ use rustc_index::IndexVec; use triomphe::Arc; use crate::{ - Interner, ProjectionTy, Substitution, TraitEnvironment, Ty, consteval::try_const_usize, db::{HirDatabase, InternedClosure}, infer::normalize, utils::ClosureSubst, + Interner, ProjectionTy, Substitution, TraitEnvironment, Ty, }; pub(crate) use self::adt::layout_of_adt_cycle_result; @@ -168,7 +168,14 @@ pub fn layout_of_ty_query( let data = db.struct_signature(*s); let repr = data.repr.unwrap_or_default(); if repr.simd() { - return layout_of_simd_ty(db, *s, repr.packed(), subst, trait_env, &target); + return layout_of_simd_ty( + db, + *s, + repr.packed(), + subst, + trait_env, + &target, + ); } }; return db.layout_of_adt(*def, subst.clone(), trait_env); From 75458977661a98942fa84d53a4c6d1791c7af2a7 Mon Sep 17 00:00:00 2001 From: wxh Date: Fri, 31 Oct 2025 10:07:00 +0800 Subject: [PATCH 09/27] Add SVE intrinsics for AArch64 architecture This commit introduces a new file containing a comprehensive set of SVE (Scalable Vector Extension) intrinsics for the AArch64 architecture. The intrinsics include functions for absolute comparisons (greater than, less than, equal to) and arithmetic operations (addition, subtraction) for various data types (float, int) and their respective vector representations. Each function is annotated with documentation links to Arm's official documentation and includes test assertions for validation. The addition of these intrinsics enhances the support for scalable vector types in Rust, aligning with recent changes to the compiler and standard library to accommodate scalable SIMD operations. Co-authored-by: Jamie Cunliffe --- .../crates/core_arch/src/aarch64/sve/sve.rs | 4861 +++++++++++++++++ 1 file changed, 4861 insertions(+) create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs new file mode 100644 index 0000000000000..4622fee0e6e13 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs @@ -0,0 +1,4861 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen2/spec/` and run the following command to re-generate this file: +// +// ``` +// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec +// ``` +#![allow(improper_ctypes)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use super::*; + +#[doc = "Absolute compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv4f32")] + fn _svacge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svacge_f32(simd_cast(pg), op1, op2)) } +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacge_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv2f64")] + fn _svacge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { simd_cast(_svacge_f64(simd_cast(pg), op1, op2)) } +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacge_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv4f32")] + fn _svacgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svacgt_f32(simd_cast(pg), op1, op2)) } +} +#[doc = "Absolute compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv2f64")] + fn _svacgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { simd_cast(_svacgt_f64(simd_cast(pg), op1, op2)) } +} +#[doc = "Absolute compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacge_f32(pg, op2, op1) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacle_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacge_f64(pg, op2, op1) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacle_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacgt_f32(pg, op2, op1) +} +#[doc = "Absolute compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svaclt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacgt_f64(pg, op2, op1) +} +#[doc = "Absolute compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svaclt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv4f32")] + fn _svadd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svadd_f32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv2f64")] + fn _svadd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svadd_f64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv16i8")] + fn _svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svadd_s8_m(pg, op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv8i16")] + fn _svadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svadd_s16_m(simd_cast(pg), op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv4i32")] + fn _svadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svadd_s32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv2i64")] + fn _svadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svadd_s64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svadd_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svadd_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svadd_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svadd_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv16i8")] + fn _svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svand_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv8i16")] + fn _svand_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svand_s16_m(simd_cast(pg), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv4i32")] + fn _svand_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svand_s32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv2i64")] + fn _svand_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svand_s64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svand_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svand_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svand_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svand_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv4f32")] + fn _svcadd_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcadd_f32_m(simd_cast(pg), op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv2f64")] + fn _svcadd_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcadd_f64_m(simd_cast(pg), op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv4f32")] + fn _svcmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_f32_m(simd_cast(pg), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv2f64")] + fn _svcmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcmla_f64_m(simd_cast(pg), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fcmla.lane.x.nxv4f32" + )] + fn _svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_lane_f32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv4f32")] + fn _svcmpeq_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpeq_f32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpeq_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv2f64")] + fn _svcmpeq_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpeq_f64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpeq_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv16i8")] + fn _svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpeq_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpeq_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv8i16")] + fn _svcmpeq_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { simd_cast(_svcmpeq_s16(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpeq_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv4i32")] + fn _svcmpeq_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpeq_s32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpeq_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv2i64")] + fn _svcmpeq_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpeq_s64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpeq_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpeq_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpeq_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpeq_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpeq_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpeq_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpeq_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpeq_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpeq_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv4f32")] + fn _svcmpgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpgt_f32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv2f64")] + fn _svcmpgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpgt_f64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv16i8")] + fn _svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpgt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv8i16")] + fn _svcmpgt_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { simd_cast(_svcmpgt_s16(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpgt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv4i32")] + fn _svcmpgt_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpgt_s32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpgt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv2i64")] + fn _svcmpgt_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpgt_s64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpgt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpgt_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpgt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpgt_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpgt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpgt_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpgt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpgt_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpgt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpgt_f32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmplt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpgt_f64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmplt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpgt_s8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmplt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpgt_s16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmplt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpgt_s32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmplt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpgt_s64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmplt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpgt_u8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmplt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpgt_u16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmplt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpgt_u32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmplt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpgt_u64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmplt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s32_m(inactive, simd_cast(pg), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe { svcvt_f32_s32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { + svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")] + fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s64_m(inactive, simd_cast(pg), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe { svcvt_f32_s64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { + svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u32_m(inactive, simd_cast(pg), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe { svcvt_f32_u32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")] + fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u64_m(inactive, simd_cast(pg), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe { svcvt_f32_u64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s32_m(inactive, simd_cast(pg), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe { svcvt_f64_s32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { + svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s64_m(inactive, simd_cast(pg), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe { svcvt_f64_s64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { + svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u32_m(inactive, simd_cast(pg), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe { svcvt_f64_u32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u64_m(inactive, simd_cast(pg), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe { svcvt_f64_u64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32(op: f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] + fn _svdup_n_f32(op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64(op: f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")] + fn _svdup_n_f64(op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8(op: i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")] + fn _svdup_n_s8(op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16(op: i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")] + fn _svdup_n_s16(op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32(op: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64(op: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] + fn _svdup_n_s64(op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8(op: u8) -> svuint8_t { + unsafe { svdup_n_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16(op: u16) -> svuint16_t { + unsafe { svdup_n_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32(op: u32) -> svuint32_t { + unsafe { svdup_n_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64(op: u64) -> svuint64_t { + unsafe { svdup_n_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] + fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1_f32(simd_cast(pg), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] + fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1_f64(simd_cast(pg), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] + fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1_s8(pg, base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] + fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1_s16(simd_cast(pg), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] + fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1_s32(simd_cast(pg), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] + fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1_s64(simd_cast(pg), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")] + fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmul_f32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")] + fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmul_f64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")] + fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmul_s8_m(pg, op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")] + fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmul_s16_m(simd_cast(pg), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")] + fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmul_s32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")] + fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmul_s64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")] + fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svorr_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")] + fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svorr_s16_m(simd_cast(pg), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")] + fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svorr_s32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")] + fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svorr_s64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b8() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")] + fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t; + } + unsafe { _svptrue_pat_b8(PATTERN) } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b16() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")] + fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t; + } + unsafe { simd_cast(_svptrue_pat_b16(PATTERN)) } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b32() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")] + fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t; + } + unsafe { simd_cast(_svptrue_pat_b32(PATTERN)) } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b64() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")] + fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t; + } + unsafe { simd_cast(_svptrue_pat_b64(PATTERN)) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] + fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst1_f32(data, simd_cast(pg), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] + fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst1_f64(data, simd_cast(pg), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] + fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst1_s8(data, pg, base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] + fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst1_s16(data, simd_cast(pg), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] + fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst1_s32(data, simd_cast(pg), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] + fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst1_s64(data, simd_cast(pg), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svst1_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svst1_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svst1_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svst1_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")] + fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsub_f32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")] + fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsub_f64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")] + fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsub_s8_m(pg, op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")] + fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsub_s16_m(simd_cast(pg), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")] + fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsub_s32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")] + fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsub_s64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_z(pg, op1, svdup_n_u64(op2)) +} From 4c35dbc6bb5da81700b16b08031aa3b1a84fa4f0 Mon Sep 17 00:00:00 2001 From: wxh Date: Fri, 31 Oct 2025 10:11:55 +0800 Subject: [PATCH 10/27] Add SVE module and types for AArch64 architecture This commit introduces a new module for Scalable Vector Extension (SVE) intrinsics specific to the AArch64 architecture. It includes the implementation of scalar type conversions, SIMD functions, and core SVE types such as `svbool_t`, `svint8_t`, and their variants. The new files `mod.rs`, `sve2.rs`, and `types.rs` provide foundational support for SVE operations, enhancing the Rust standard library's capabilities for scalable SIMD programming. The implementation includes traits for scalar conversion and type casting, ensuring compatibility with various data types. This addition lays the groundwork for future enhancements and optimizations in SIMD operations within Rust. Co-authored-by: Jamie Cunliffe --- .../crates/core_arch/src/aarch64/sve/mod.rs | 153 ++++++ .../crates/core_arch/src/aarch64/sve/sve2.rs | 9 + .../crates/core_arch/src/aarch64/sve/types.rs | 515 ++++++++++++++++++ 3 files changed, 677 insertions(+) create mode 100755 library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve/sve2.rs create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve/types.rs diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs new file mode 100755 index 0000000000000..afc896bf853cd --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -0,0 +1,153 @@ +#![allow(unused_unsafe)] + +mod sve; +mod sve2; +mod types; + +// 导出辅助函数 +#[inline(always)] +pub(crate) unsafe fn simd_reinterpret(x: T) -> U { + crate::intrinsics::simd::simd_cast(x) +} + +#[inline(always)] +pub(crate) unsafe fn simd_cast(x: T) -> U { + crate::intrinsics::simd::simd_cast(x) +} + +#[inline(always)] +pub(crate) unsafe fn simd_select(m: M, a: T, b: T) -> T { + crate::intrinsics::simd::simd_select(m, a, b) +} + +// 标量类型转换 Trait(用于生成代码中的类型转换) +trait ScalarConversion: Sized { + type Unsigned; + type Signed; + fn as_unsigned(self) -> Self::Unsigned; + fn as_signed(self) -> Self::Signed; +} + +// 基本整数类型实现 +impl ScalarConversion for i8 { + type Unsigned = u8; + type Signed = i8; + #[inline(always)] + fn as_unsigned(self) -> u8 { self as u8 } + #[inline(always)] + fn as_signed(self) -> i8 { self } +} + +impl ScalarConversion for u8 { + type Unsigned = u8; + type Signed = i8; + #[inline(always)] + fn as_unsigned(self) -> u8 { self } + #[inline(always)] + fn as_signed(self) -> i8 { self as i8 } +} + +impl ScalarConversion for i16 { + type Unsigned = u16; + type Signed = i16; + #[inline(always)] + fn as_unsigned(self) -> u16 { self as u16 } + #[inline(always)] + fn as_signed(self) -> i16 { self } +} + +impl ScalarConversion for u16 { + type Unsigned = u16; + type Signed = i16; + #[inline(always)] + fn as_unsigned(self) -> u16 { self } + #[inline(always)] + fn as_signed(self) -> i16 { self as i16 } +} + +impl ScalarConversion for i32 { + type Unsigned = u32; + type Signed = i32; + #[inline(always)] + fn as_unsigned(self) -> u32 { self as u32 } + #[inline(always)] + fn as_signed(self) -> i32 { self } +} + +impl ScalarConversion for u32 { + type Unsigned = u32; + type Signed = i32; + #[inline(always)] + fn as_unsigned(self) -> u32 { self } + #[inline(always)] + fn as_signed(self) -> i32 { self as i32 } +} + +impl ScalarConversion for i64 { + type Unsigned = u64; + type Signed = i64; + #[inline(always)] + fn as_unsigned(self) -> u64 { self as u64 } + #[inline(always)] + fn as_signed(self) -> i64 { self } +} + +impl ScalarConversion for u64 { + type Unsigned = u64; + type Signed = i64; + #[inline(always)] + fn as_unsigned(self) -> u64 { self } + #[inline(always)] + fn as_signed(self) -> i64 { self as i64 } +} + +// 指针类型实现 - 分别为有符号和无符号指针实现 +macro_rules! impl_scalar_conversion_for_ptr { + ($(($unsigned:ty, $signed:ty)),*) => {$( + impl ScalarConversion for *const $unsigned { + type Unsigned = *const $unsigned; + type Signed = *const $signed; + #[inline(always)] + fn as_unsigned(self) -> *const $unsigned { self } + #[inline(always)] + fn as_signed(self) -> *const $signed { self as *const $signed } + } + + impl ScalarConversion for *const $signed { + type Unsigned = *const $unsigned; + type Signed = *const $signed; + #[inline(always)] + fn as_unsigned(self) -> *const $unsigned { self as *const $unsigned } + #[inline(always)] + fn as_signed(self) -> *const $signed { self } + } + + impl ScalarConversion for *mut $unsigned { + type Unsigned = *mut $unsigned; + type Signed = *mut $signed; + #[inline(always)] + fn as_unsigned(self) -> *mut $unsigned { self } + #[inline(always)] + fn as_signed(self) -> *mut $signed { self as *mut $signed } + } + + impl ScalarConversion for *mut $signed { + type Unsigned = *mut $unsigned; + type Signed = *mut $signed; + #[inline(always)] + fn as_unsigned(self) -> *mut $unsigned { self as *mut $unsigned } + #[inline(always)] + fn as_signed(self) -> *mut $signed { self } + } + )*}; +} + +impl_scalar_conversion_for_ptr!((u8, i8), (u16, i16), (u32, i32), (u64, i64)); + +// 导出所有类型和函数 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub use sve::*; +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub use sve2::*; +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub use types::*; diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve2.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve2.rs new file mode 100644 index 0000000000000..a1a8468910288 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve2.rs @@ -0,0 +1,9 @@ +// 最小化 SVE2 intrinsics - 用于编译测试 +#![allow(improper_ctypes)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use super::*; + +// SVE2 intrinsics placeholder diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs new file mode 100644 index 0000000000000..c40443cbe4c83 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs @@ -0,0 +1,515 @@ +#![allow(non_camel_case_types)] + +// ============================================================================ +// 核心SVE类型定义 - 最小化版本用于编译测试 +// ============================================================================ + +/// SVE谓词类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(1)] +pub struct svbool_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svbool_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svbool_t { + fn clone(&self) -> Self { *self } +} + +/// SVE双宽度谓词类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(2)] +pub struct svbool2_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svbool2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svbool2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE四宽度谓词类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(4)] +pub struct svbool4_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svbool4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svbool4_t { + fn clone(&self) -> Self { *self } +} + +/// SVE八宽度谓词类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(8)] +pub struct svbool8_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svbool8_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svbool8_t { + fn clone(&self) -> Self { *self } +} + +// ============================================================================ +// SVE 向量类型定义 +// ============================================================================ + +/// SVE 8位有符号整数向量 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(16)] +pub struct svint8_t(i8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint8_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint8_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位有符号整数向量 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(8)] +pub struct svint16_t(i16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint16_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint16_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位有符号整数向量 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(4)] +pub struct svint32_t(i32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint32_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint32_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位有符号整数向量 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(2)] +pub struct svint64_t(i64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint64_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint64_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 8位无符号整数向量 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(16)] +pub struct svuint8_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint8_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint8_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位无符号整数向量 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(8)] +pub struct svuint16_t(u16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint16_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint16_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位无符号整数向量 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(4)] +pub struct svuint32_t(u32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint32_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint32_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位无符号整数向量 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(2)] +pub struct svuint64_t(u64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint64_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint64_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位浮点向量 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(4)] +pub struct svfloat32_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat32_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat32_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位浮点向量 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(2)] +pub struct svfloat64_t(f64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat64_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat64_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位浮点向量 (使用 f32 作为底层类型) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(8)] +pub struct svfloat16_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat16_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat16_t { + fn clone(&self) -> Self { *self } +} + +// ============================================================================ +// SVE 辅助类型 +// ============================================================================ + +/// SVE模式类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[derive(Copy, Clone, PartialEq, Eq, Debug, core::marker::ConstParamTy)] +pub struct svpattern(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svpattern { + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_ALL: svpattern = svpattern(31); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL1: svpattern = svpattern(1); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL2: svpattern = svpattern(2); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL3: svpattern = svpattern(3); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL4: svpattern = svpattern(4); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL5: svpattern = svpattern(5); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL6: svpattern = svpattern(6); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL7: svpattern = svpattern(7); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL8: svpattern = svpattern(8); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL16: svpattern = svpattern(9); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL32: svpattern = svpattern(10); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL64: svpattern = svpattern(11); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL128: svpattern = svpattern(12); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL256: svpattern = svpattern(13); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_POW2: svpattern = svpattern(30); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_MUL4: svpattern = svpattern(29); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_MUL3: svpattern = svpattern(28); +} + +/// SVE预取操作类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[derive(Copy, Clone, PartialEq, Eq, Debug, core::marker::ConstParamTy)] +pub struct svprfop(u8); + +// ============================================================================ +// 类型转换辅助函数(仅用于内部) +// ============================================================================ + +#[inline] +pub(crate) unsafe fn simd_cast(x: T) -> U { + crate::intrinsics::simd::simd_cast(x) +} + +// ============================================================================ +// 类 From trait 的转换方法 - 适用于交叉编译 +// ============================================================================ + +// 方案:使用 Associated Functions 提供类似 From::from 的 API +// 这些方法添加了 #[target_feature(enable = "sve")],可以在交叉编译时正常工作 + +/// svbool_t 的转换方法 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool_t { + /// 转换为 svbool2_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool2(self) -> svbool2_t { + simd_cast(self) + } + + /// 转换为 svbool4_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool4(self) -> svbool4_t { + simd_cast(self) + } + + /// 转换为 svbool8_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool8(self) -> svbool8_t { + simd_cast(self) + } +} + +/// svbool2_t 的转换方法 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool2_t { + /// 从 svbool_t 创建(类似 From::from) + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool(x: svbool_t) -> Self { + simd_cast(x) + } + + /// 转换为 svbool_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool(self) -> svbool_t { + simd_cast(self) + } +} + +/// svbool4_t 的转换方法 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool4_t { + /// 从 svbool_t 创建(类似 From::from) + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool(x: svbool_t) -> Self { + simd_cast(x) + } + + /// 转换为 svbool_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool(self) -> svbool_t { + simd_cast(self) + } +} + +/// svbool8_t 的转换方法 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool8_t { + /// 从 svbool_t 创建(类似 From::from) + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool(x: svbool_t) -> Self { + simd_cast(x) + } + + /// 转换为 svbool_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool(self) -> svbool_t { + simd_cast(self) + } +} + +// ============================================================================ +// 类型转换 Trait - 用于生成的代码 +// ============================================================================ + +/// 转换为无符号向量类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub trait AsUnsigned { + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + type Unsigned; + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + fn as_unsigned(self) -> Self::Unsigned; +} + +/// 转换为有符号向量类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub trait AsSigned { + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + type Signed; + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + fn as_signed(self) -> Self::Signed; +} + +// 为所有 SVE 整数类型实现转换 trait +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint8_t { + type Unsigned = svuint8_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint8_t { + type Signed = svint8_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint8_t { + type Unsigned = svuint8_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint8_t { + type Signed = svint8_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint16_t { + type Unsigned = svuint16_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint16_t { + type Signed = svint16_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint16_t { + type Unsigned = svuint16_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint16_t { + type Signed = svint16_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint32_t { + type Unsigned = svuint32_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint32_t { + type Signed = svint32_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint32_t { + type Unsigned = svuint32_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint32_t { + type Signed = svint32_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint64_t { + type Unsigned = svuint64_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint64_t { + type Signed = svint64_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint64_t { + type Unsigned = svuint64_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint64_t { + type Signed = svint64_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} + +// ============================================================================ From cf3328e5950674777e3fe8d24e8ca3410836809d Mon Sep 17 00:00:00 2001 From: wxh Date: Fri, 7 Nov 2025 11:48:06 +0800 Subject: [PATCH 11/27] =?UTF-8?q?=E7=9B=B4=E6=8E=A5=E5=BC=95=E7=94=A8?= =?UTF-8?q?=E4=BA=86SVE=E7=B1=BB=E5=9E=8B=EF=BC=8C=E4=BD=86=E6=B2=A1?= =?UTF-8?q?=E5=BC=95=E7=94=A8=E5=87=BD=E6=95=B0=EF=BC=8C=E8=83=BD=E7=BC=96?= =?UTF-8?q?=E8=AF=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/mod.rs | 3 +- tests/codegen-llvm/scalable-vectors/simple.rs | 88 +++++++++++++++++-- 2 files changed, 85 insertions(+), 6 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs index afc896bf853cd..20b5e185e4570 100755 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -2,7 +2,8 @@ mod sve; mod sve2; -mod types; +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub mod types; // 导出辅助函数 #[inline(always)] diff --git a/tests/codegen-llvm/scalable-vectors/simple.rs b/tests/codegen-llvm/scalable-vectors/simple.rs index 9706b4acab340..76965d0331138 100644 --- a/tests/codegen-llvm/scalable-vectors/simple.rs +++ b/tests/codegen-llvm/scalable-vectors/simple.rs @@ -2,12 +2,30 @@ //@ only-aarch64 #![crate_type = "lib"] #![allow(incomplete_features, internal_features)] -#![feature(simd_ffi, rustc_attrs, link_llvm_intrinsics)] +#![feature(simd_ffi, rustc_attrs, link_llvm_intrinsics, stdarch_aarch64_sve)] -#[derive(Copy, Clone)] -#[rustc_scalable_vector(4)] -#[allow(non_camel_case_types)] -pub struct svint32_t(i32); +// ============================================================================ +// 演示 SVE 类型的多种导入方式 +// ============================================================================ + +// 方式 1: 直接从 aarch64 模块导入所有类型(推荐,因为类型已被重新导出) +use std::arch::aarch64::{svint32_t, svint64_t, svfloat32_t, svuint32_t}; + +// 方式 1b: 通过 sve 模块导入所有类型(需要 sve 模块被公开导出) +// use std::arch::aarch64::sve::types::*; + +// 方式 2: 导入 types 模块,然后使用模块路径 +// use std::arch::aarch64::sve::types; +// 使用: types::svint32_t, types::svint64_t 等 + +// 方式 3: 从 types 模块导入特定类型 +// use std::arch::aarch64::sve::types::{svint32_t, svint64_t, svfloat32_t}; + +// 方式 4: 直接导入类型(原有方式,仍然有效) +// use std::arch::aarch64::{svint32_t, svint64_t, svfloat32_t}; + +// 方式 5: 通过 aarch64 模块导入(因为 sve 模块被重新导出) +// use std::arch::aarch64::types::*; #[inline(never)] #[target_feature(enable = "sve")] @@ -47,3 +65,63 @@ pub unsafe fn test() -> svint32_t { // CHECK: %_0 = call @pass_as_ref(ptr noalias noundef nonnull readonly align 16 dereferenceable(16) %a, %b) pass_as_ref(&a, b) } + +// ============================================================================ +// 演示使用不同类型的示例(展示 types 模块导入的便利性) +// ============================================================================ + +// 示例:使用 svint64_t 类型 +#[inline(never)] +#[target_feature(enable = "sve")] +pub unsafe fn svdup_n_s64(op: i64) -> svint64_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] + fn _svdup_n_s64(op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64(op) } +} + +// 示例:使用 svfloat32_t 类型 +#[inline(never)] +#[target_feature(enable = "sve")] +pub unsafe fn svdup_n_f32(op: f32) -> svfloat32_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] + fn _svdup_n_f32(op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32(op) } +} + +// 示例:混合使用多种类型(注意:可扩展向量类型不能作为元组字段) +// 因此我们分别创建三个函数来演示不同类型的使用 +#[no_mangle] +#[target_feature(enable = "sve")] +pub unsafe fn test_multiple_types_i32() -> svint32_t { + let i32_vec = svdup_n_s32(42); + i32_vec +} + +#[no_mangle] +#[target_feature(enable = "sve")] +pub unsafe fn test_multiple_types_i64() -> svint64_t { + let i64_vec = svdup_n_s64(100); + i64_vec +} + +#[no_mangle] +#[target_feature(enable = "sve")] +pub unsafe fn test_multiple_types_f32() -> svfloat32_t { + let f32_vec = svdup_n_f32(3.14); + f32_vec +} + +// 示例:使用 svuint32_t 类型(展示无符号类型) +#[inline(never)] +#[target_feature(enable = "sve")] +pub unsafe fn svdup_n_u32(op: u32) -> svuint32_t { + extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_u32(op: u32) -> svuint32_t; + } + unsafe { _svdup_n_u32(op) } +} From f8de7cd20a69c10603afe75781776bff3e35a868 Mon Sep 17 00:00:00 2001 From: wxh Date: Wed, 12 Nov 2025 09:45:04 +0800 Subject: [PATCH 12/27] =?UTF-8?q?=E5=AE=9E=E7=8E=B0=E4=BA=86=E7=9B=B4?= =?UTF-8?q?=E6=8E=A5=E5=BC=95=E7=94=A8SVE=E7=B1=BB=E5=9E=8B=E5=92=8CIntrin?= =?UTF-8?q?sic=E5=87=BD=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/mod.rs | 5 + .../crates/core_arch/src/aarch64/sve/mod.rs | 208 ++++++++++++++++-- .../crates/core_arch/src/aarch64/sve/types.rs | 9 +- tests/codegen-llvm/scalable-vectors/simple.rs | 109 ++++----- 4 files changed, 244 insertions(+), 87 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/mod.rs index f4b9b1c30251e..0a71fa5a2f92d 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/mod.rs @@ -13,6 +13,11 @@ allow(aarch64_softfloat_neon) )] +#[unstable(feature = "stdarch_aarch64_sve", issue = "99999999")] +pub mod sve; +#[unstable(feature = "stdarch_aarch64_sve", issue = "99999999")] +pub use self::sve::*; + mod mte; #[unstable(feature = "stdarch_aarch64_mte", issue = "129010")] pub use self::mte::*; diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs index 20b5e185e4570..c61a5e21db826 100755 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -5,23 +5,183 @@ mod sve2; #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub mod types; -// 导出辅助函数 +// ================================ +// 修复点 1/2:去掉 simd_*,改为位级转换 +// ================================ #[inline(always)] pub(crate) unsafe fn simd_reinterpret(x: T) -> U { - crate::intrinsics::simd::simd_cast(x) + // 纯位级重解释;SVE 封装类型在这层视为opaque,避免走 simd_cast 触发 E0511 + core::mem::transmute_copy::(&x) } #[inline(always)] pub(crate) unsafe fn simd_cast(x: T) -> U { - crate::intrinsics::simd::simd_cast(x) + // 多数 SVE “cast”在 stdarch 内部只是布局相同的重解释;按位转即可 + // 如需数值语义转换,请在具体 API 内对接相应 LLVM SVE convert 内建。 + core::mem::transmute_copy::(&x) } +// ================================ +// 修复点 3/3:逐类型绑定 LLVM SVE `sel` 内建,替代 simd_select +// 说明:SVE 的“按谓词选择”在 LLVM 里是 aarch64.sve.sel.* 内建, +// 名字与元素类型/宽度对应,如:nxv16i8/nxv8i16/nxv4i32/nxv2i64、nxv4f32/nxv2f64。 +// 这是最稳妥的做法,避免把非SIMD类型喂给 simd_select 触发 E0511。 +// ================================ +use types::*; + +// 用 trait 把选择操作"静态分派"到对应的 LLVM SVE sel 内建上 +pub(crate) trait __SveSelect { + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self; +} + +// 声明 LLVM 内建函数(每个唯一的后缀只声明一次) +// 使用泛型函数指针类型,避免重复声明 +unsafe extern "C" { + #[link_name = "llvm.aarch64.sve.sel.nxv16i8"] + fn __llvm_sve_sel_nxv16i8(mask: svbool_t, a: svint8_t, b: svint8_t) -> svint8_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv8i16"] + fn __llvm_sve_sel_nxv8i16(mask: svbool_t, a: svint16_t, b: svint16_t) -> svint16_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv4i32"] + fn __llvm_sve_sel_nxv4i32(mask: svbool_t, a: svint32_t, b: svint32_t) -> svint32_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv2i64"] + fn __llvm_sve_sel_nxv2i64(mask: svbool_t, a: svint64_t, b: svint64_t) -> svint64_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv4f32"] + fn __llvm_sve_sel_nxv4f32(mask: svbool_t, a: svfloat32_t, b: svfloat32_t) -> svfloat32_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv2f64"] + fn __llvm_sve_sel_nxv2f64(mask: svbool_t, a: svfloat64_t, b: svfloat64_t) -> svfloat64_t; + + #[link_name = "llvm.aarch64.sve.sel.nxv16i1"] + fn __llvm_sve_sel_nxv16i1(mask: svbool_t, a: svbool_t, b: svbool_t) -> svbool_t; +} + +// 为每个类型实现 trait,调用对应的 LLVM 内建函数 +// 注意:svint8_t 和 svuint8_t 共享同一个 LLVM 内建函数(都是 nxv16i8) +// 由于它们在 LLVM 层面是相同的类型,可以直接使用 transmute 进行类型转换 +impl __SveSelect for svint8_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv16i8(mask, a, b) + } +} + +impl __SveSelect for svuint8_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + // svuint8_t 和 svint8_t 在 LLVM 层面是相同的类型(都是 nxv16i8) + core::mem::transmute(__llvm_sve_sel_nxv16i8(mask, core::mem::transmute(a), core::mem::transmute(b))) + } +} + +impl __SveSelect for svint16_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv8i16(mask, a, b) + } +} + +impl __SveSelect for svuint16_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + core::mem::transmute(__llvm_sve_sel_nxv8i16(mask, core::mem::transmute(a), core::mem::transmute(b))) + } +} + +impl __SveSelect for svint32_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv4i32(mask, a, b) + } +} + +impl __SveSelect for svuint32_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + core::mem::transmute(__llvm_sve_sel_nxv4i32(mask, core::mem::transmute(a), core::mem::transmute(b))) + } +} + +impl __SveSelect for svint64_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv2i64(mask, a, b) + } +} + +impl __SveSelect for svuint64_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + core::mem::transmute(__llvm_sve_sel_nxv2i64(mask, core::mem::transmute(a), core::mem::transmute(b))) + } +} + +impl __SveSelect for svfloat32_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv4f32(mask, a, b) + } +} + +impl __SveSelect for svfloat64_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv2f64(mask, a, b) + } +} + +// svbool_t 是 1 位谓词向量,对应 nxv16i1 +impl __SveSelect for svbool_t { + #[inline(always)] + unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { + __llvm_sve_sel_nxv16i1(mask, a, b) + } +} + +// 如果你在 types.rs 支持了 f16 / bf16 / mfloat8,可按需解开/补齐: +// impl_sve_select!("nxv8f16", svfloat16_t); +// impl_sve_select!("nxv8bf16", svbfloat16_t); +// impl_sve_select!("nxv16f8", svmfloat8_t); + +// 实现从不同宽度的谓词类型到 svbool_t 的转换 +impl From for svbool_t { + #[inline(always)] + fn from(x: svbool2_t) -> Self { + unsafe { simd_cast(x) } + } +} + +impl From for svbool_t { + #[inline(always)] + fn from(x: svbool4_t) -> Self { + unsafe { simd_cast(x) } + } +} + +impl From for svbool_t { + #[inline(always)] + fn from(x: svbool8_t) -> Self { + unsafe { simd_cast(x) } + } +} + +// 公开的"选择"总入口:保持原函数签名不变(被 sve/*.rs 调用) +// 现在它不再走 simd_select,而是经 trait 静态分派到 LLVM SVE `sel` #[inline(always)] -pub(crate) unsafe fn simd_select(m: M, a: T, b: T) -> T { - crate::intrinsics::simd::simd_select(m, a, b) +pub(crate) unsafe fn simd_select(m: M, a: T, b: T) -> T +where + // SVE 谓词统一为 svbool_t;避免出现 svbool4_t/svbool8_t 这类"假类型" + M: Into, + T: __SveSelect, +{ + let mask: svbool_t = m.into(); + ::sel(mask, a, b) } -// 标量类型转换 Trait(用于生成代码中的类型转换) +// -------- 下面保持你原有的标量转换 Trait 实现不变 -------- trait ScalarConversion: Sized { type Unsigned; type Signed; @@ -29,7 +189,6 @@ trait ScalarConversion: Sized { fn as_signed(self) -> Self::Signed; } -// 基本整数类型实现 impl ScalarConversion for i8 { type Unsigned = u8; type Signed = i8; @@ -38,7 +197,6 @@ impl ScalarConversion for i8 { #[inline(always)] fn as_signed(self) -> i8 { self } } - impl ScalarConversion for u8 { type Unsigned = u8; type Signed = i8; @@ -47,7 +205,6 @@ impl ScalarConversion for u8 { #[inline(always)] fn as_signed(self) -> i8 { self as i8 } } - impl ScalarConversion for i16 { type Unsigned = u16; type Signed = i16; @@ -56,7 +213,6 @@ impl ScalarConversion for i16 { #[inline(always)] fn as_signed(self) -> i16 { self } } - impl ScalarConversion for u16 { type Unsigned = u16; type Signed = i16; @@ -65,7 +221,6 @@ impl ScalarConversion for u16 { #[inline(always)] fn as_signed(self) -> i16 { self as i16 } } - impl ScalarConversion for i32 { type Unsigned = u32; type Signed = i32; @@ -74,7 +229,6 @@ impl ScalarConversion for i32 { #[inline(always)] fn as_signed(self) -> i32 { self } } - impl ScalarConversion for u32 { type Unsigned = u32; type Signed = i32; @@ -83,7 +237,6 @@ impl ScalarConversion for u32 { #[inline(always)] fn as_signed(self) -> i32 { self as i32 } } - impl ScalarConversion for i64 { type Unsigned = u64; type Signed = i64; @@ -92,7 +245,6 @@ impl ScalarConversion for i64 { #[inline(always)] fn as_signed(self) -> i64 { self } } - impl ScalarConversion for u64 { type Unsigned = u64; type Signed = i64; @@ -102,7 +254,7 @@ impl ScalarConversion for u64 { fn as_signed(self) -> i64 { self as i64 } } -// 指针类型实现 - 分别为有符号和无符号指针实现 +// 指针类型实现 macro_rules! impl_scalar_conversion_for_ptr { ($(($unsigned:ty, $signed:ty)),*) => {$( impl ScalarConversion for *const $unsigned { @@ -113,7 +265,6 @@ macro_rules! impl_scalar_conversion_for_ptr { #[inline(always)] fn as_signed(self) -> *const $signed { self as *const $signed } } - impl ScalarConversion for *const $signed { type Unsigned = *const $unsigned; type Signed = *const $signed; @@ -122,7 +273,6 @@ macro_rules! impl_scalar_conversion_for_ptr { #[inline(always)] fn as_signed(self) -> *const $signed { self } } - impl ScalarConversion for *mut $unsigned { type Unsigned = *mut $unsigned; type Signed = *mut $signed; @@ -131,7 +281,6 @@ macro_rules! impl_scalar_conversion_for_ptr { #[inline(always)] fn as_signed(self) -> *mut $signed { self as *mut $signed } } - impl ScalarConversion for *mut $signed { type Unsigned = *mut $unsigned; type Signed = *mut $signed; @@ -142,13 +291,32 @@ macro_rules! impl_scalar_conversion_for_ptr { } )*}; } - impl_scalar_conversion_for_ptr!((u8, i8), (u16, i16), (u32, i32), (u64, i64)); -// 导出所有类型和函数 +// 维持对外导出 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub use sve::*; #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub use sve2::*; #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub use types::*; + +// a) 外部内建 +unsafe extern "C" { + #[link_name = "llvm.aarch64.sve.cntw"] + fn __llvm_sve_cntw() -> i32; + + #[link_name = "llvm.aarch64.sve.whilelt"] + fn __llvm_sve_whilelt_i32(i: i32, n: i32) -> svbool_t; +} + +// b) 对外 API +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svcntw() -> i32 { __llvm_sve_cntw() } + +#[inline] +#[target_feature(enable = "sve")] +pub unsafe fn svwhilelt_b32(i: i32, n: i32) -> svbool_t { + __llvm_sve_whilelt_i32(i, n) +} diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs index c40443cbe4c83..4bfcca4a3ee76 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs @@ -1,5 +1,8 @@ #![allow(non_camel_case_types)] +// 导入父模块中的 simd_cast 函数 +use super::simd_cast; + // ============================================================================ // 核心SVE类型定义 - 最小化版本用于编译测试 // ============================================================================ @@ -260,11 +263,7 @@ pub struct svprfop(u8); // ============================================================================ // 类型转换辅助函数(仅用于内部) // ============================================================================ - -#[inline] -pub(crate) unsafe fn simd_cast(x: T) -> U { - crate::intrinsics::simd::simd_cast(x) -} +// 注意:simd_cast 函数定义在父模块 mod.rs 中,使用 transmute_copy 避免 E0511 错误 // ============================================================================ // 类 From trait 的转换方法 - 适用于交叉编译 diff --git a/tests/codegen-llvm/scalable-vectors/simple.rs b/tests/codegen-llvm/scalable-vectors/simple.rs index 76965d0331138..cc9ac2e05cf3a 100644 --- a/tests/codegen-llvm/scalable-vectors/simple.rs +++ b/tests/codegen-llvm/scalable-vectors/simple.rs @@ -5,38 +5,21 @@ #![feature(simd_ffi, rustc_attrs, link_llvm_intrinsics, stdarch_aarch64_sve)] // ============================================================================ -// 演示 SVE 类型的多种导入方式 +// 演示 SVE 类型和函数的导入方式 // ============================================================================ -// 方式 1: 直接从 aarch64 模块导入所有类型(推荐,因为类型已被重新导出) -use std::arch::aarch64::{svint32_t, svint64_t, svfloat32_t, svuint32_t}; +// 从 aarch64 模块导入 SVE 类型(类型已被重新导出) +use std::arch::aarch64::{svint32_t, svint64_t, svfloat32_t, svuint32_t, svpattern}; -// 方式 1b: 通过 sve 模块导入所有类型(需要 sve 模块被公开导出) -// use std::arch::aarch64::sve::types::*; - -// 方式 2: 导入 types 模块,然后使用模块路径 -// use std::arch::aarch64::sve::types; -// 使用: types::svint32_t, types::svint64_t 等 - -// 方式 3: 从 types 模块导入特定类型 -// use std::arch::aarch64::sve::types::{svint32_t, svint64_t, svfloat32_t}; - -// 方式 4: 直接导入类型(原有方式,仍然有效) -// use std::arch::aarch64::{svint32_t, svint64_t, svfloat32_t}; - -// 方式 5: 通过 aarch64 模块导入(因为 sve 模块被重新导出) -// use std::arch::aarch64::types::*; - -#[inline(never)] -#[target_feature(enable = "sve")] -pub unsafe fn svdup_n_s32(op: i32) -> svint32_t { - extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] - fn _svdup_n_s32(op: i32) -> svint32_t; - } - unsafe { _svdup_n_s32(op) } -} +// 从 aarch64 模块导入 SVE intrinsics 函数(函数已被重新导出) +use std::arch::aarch64::{ + svdup_n_s32, svdup_n_s64, svdup_n_f32, svdup_n_u32, + svadd_s32_z, svsub_s32_z, svmul_s32_z, + svptrue_pat_b32, +}; +// 注意:svxar_n_s32 是 SVE2 函数,如果库中未定义,可以保留本地定义 +// 或者使用其他已定义的函数替代 #[inline] #[target_feature(enable = "sve,sve2")] pub unsafe fn svxar_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { @@ -47,6 +30,10 @@ pub unsafe fn svxar_n_s32(op1: svint32_t, op2: svint32_t) -> sv unsafe { _svxar_n_s32(op1, op2, IMM3) } } +// ============================================================================ +// 测试用例:使用库中定义的 SVE intrinsics 函数 +// ============================================================================ + #[inline(never)] #[no_mangle] #[target_feature(enable = "sve,sve2")] @@ -57,9 +44,10 @@ pub unsafe fn pass_as_ref(a: &svint32_t, b: svint32_t) -> svint32_t { } #[no_mangle] -#[target_feature(enable = "sve,sve2")] +#[target_feature(enable = "sve")] // CHECK: define @test() pub unsafe fn test() -> svint32_t { + // 使用库中定义的 svdup_n_s32 函数 let a = svdup_n_s32(1); let b = svdup_n_s32(2); // CHECK: %_0 = call @pass_as_ref(ptr noalias noundef nonnull readonly align 16 dereferenceable(16) %a, %b) @@ -67,36 +55,13 @@ pub unsafe fn test() -> svint32_t { } // ============================================================================ -// 演示使用不同类型的示例(展示 types 模块导入的便利性) +// 演示使用不同类型的示例 // ============================================================================ -// 示例:使用 svint64_t 类型 -#[inline(never)] -#[target_feature(enable = "sve")] -pub unsafe fn svdup_n_s64(op: i64) -> svint64_t { - extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] - fn _svdup_n_s64(op: i64) -> svint64_t; - } - unsafe { _svdup_n_s64(op) } -} - -// 示例:使用 svfloat32_t 类型 -#[inline(never)] -#[target_feature(enable = "sve")] -pub unsafe fn svdup_n_f32(op: f32) -> svfloat32_t { - extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] - fn _svdup_n_f32(op: f32) -> svfloat32_t; - } - unsafe { _svdup_n_f32(op) } -} - -// 示例:混合使用多种类型(注意:可扩展向量类型不能作为元组字段) -// 因此我们分别创建三个函数来演示不同类型的使用 #[no_mangle] #[target_feature(enable = "sve")] pub unsafe fn test_multiple_types_i32() -> svint32_t { + // 使用库中定义的 svdup_n_s32 函数 let i32_vec = svdup_n_s32(42); i32_vec } @@ -104,6 +69,7 @@ pub unsafe fn test_multiple_types_i32() -> svint32_t { #[no_mangle] #[target_feature(enable = "sve")] pub unsafe fn test_multiple_types_i64() -> svint64_t { + // 使用库中定义的 svdup_n_s64 函数 let i64_vec = svdup_n_s64(100); i64_vec } @@ -111,17 +77,36 @@ pub unsafe fn test_multiple_types_i64() -> svint64_t { #[no_mangle] #[target_feature(enable = "sve")] pub unsafe fn test_multiple_types_f32() -> svfloat32_t { + // 使用库中定义的 svdup_n_f32 函数 let f32_vec = svdup_n_f32(3.14); f32_vec } -// 示例:使用 svuint32_t 类型(展示无符号类型) -#[inline(never)] +#[no_mangle] #[target_feature(enable = "sve")] -pub unsafe fn svdup_n_u32(op: u32) -> svuint32_t { - extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] - fn _svdup_n_u32(op: u32) -> svuint32_t; - } - unsafe { _svdup_n_u32(op) } +pub unsafe fn test_multiple_types_u32() -> svuint32_t { + // 使用库中定义的 svdup_n_u32 函数 + let u32_vec = svdup_n_u32(200); + u32_vec +} + +// ============================================================================ +// 演示使用其他 SVE intrinsics 函数 +// ============================================================================ + +#[no_mangle] +#[target_feature(enable = "sve")] +pub unsafe fn test_arithmetic_operations() -> svint32_t { + // 使用库中定义的函数进行算术运算 + let a = svdup_n_s32(10); + let b = svdup_n_s32(20); + // 创建全真谓词(使用 SV_ALL 模式) + const PATTERN_ALL: svpattern = svpattern::SV_ALL; + let pg = svptrue_pat_b32::(); + // 加法 + let sum = svadd_s32_z(pg, a, b); + // 减法 + let diff = svsub_s32_z(pg, b, a); + // 乘法 + svmul_s32_z(pg, sum, diff) } From 82f8b65b934bbadeac4663e4980deb6423241215 Mon Sep 17 00:00:00 2001 From: wxh Date: Wed, 12 Nov 2025 14:42:55 +0800 Subject: [PATCH 13/27] =?UTF-8?q?sve.rs=E4=B8=AD=E6=96=B0=E5=A2=9E?= =?UTF-8?q?=E4=BA=86=E9=99=A4=E6=B3=95=E5=87=BD=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/sve.rs | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs index 4622fee0e6e13..07d7125210b84 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs @@ -3105,6 +3105,70 @@ pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { svmul_s32_z(pg, op1, svdup_n_s32(op2)) } +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")] + fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_s32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_z(pg, op1, svdup_n_s32(op2)) +} #[doc = "Multiply"] #[doc = ""] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] From 271aa7b238d4c798baa8c04f54bc78cd63d4acb0 Mon Sep 17 00:00:00 2001 From: wxh Date: Thu, 13 Nov 2025 17:08:12 +0800 Subject: [PATCH 14/27] =?UTF-8?q?=E5=AF=B9SVE=E7=B1=BB=E5=9E=8B=E8=BF=9B?= =?UTF-8?q?=E8=A1=8C=E4=BA=86=E9=80=82=E5=BD=93=E8=A1=A5=E5=85=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/types.rs | 1274 ++++++++++++++--- 1 file changed, 1109 insertions(+), 165 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs index 4bfcca4a3ee76..74515b1922573 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs @@ -206,193 +206,1137 @@ impl Clone for svfloat16_t { fn clone(&self) -> Self { *self } } +// ============================================================================ +// SVE 向量元组类型定义 +// ============================================================================ + +/// SVE 8位有符号整数双向量 (x2) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(32)] +pub struct svint8x2_t(i8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint8x2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint8x2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 8位无符号整数双向量 (x2) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(32)] +pub struct svuint8x2_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint8x2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint8x2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位有符号整数双向量 (x2) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(16)] +pub struct svint16x2_t(i16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint16x2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint16x2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位无符号整数双向量 (x2) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(16)] +pub struct svuint16x2_t(u16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint16x2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint16x2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位浮点双向量 (x2) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(8)] +pub struct svfloat32x2_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat32x2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat32x2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位有符号整数双向量 (x2) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(8)] +pub struct svint32x2_t(i32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint32x2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint32x2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位无符号整数双向量 (x2) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(8)] +pub struct svuint32x2_t(u32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint32x2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint32x2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位浮点双向量 (x2) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(4)] +pub struct svfloat64x2_t(f64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat64x2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat64x2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位有符号整数双向量 (x2) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(4)] +pub struct svint64x2_t(i64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint64x2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint64x2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位无符号整数双向量 (x2) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(4)] +pub struct svuint64x2_t(u64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint64x2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint64x2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 8位有符号整数三向量 (x3) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(48)] +pub struct svint8x3_t(i8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint8x3_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint8x3_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 8位无符号整数三向量 (x3) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(48)] +pub struct svuint8x3_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint8x3_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint8x3_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位有符号整数三向量 (x3) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(24)] +pub struct svint16x3_t(i16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint16x3_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint16x3_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位无符号整数三向量 (x3) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(24)] +pub struct svuint16x3_t(u16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint16x3_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint16x3_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位浮点三向量 (x3) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(12)] +pub struct svfloat32x3_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat32x3_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat32x3_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位有符号整数三向量 (x3) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(12)] +pub struct svint32x3_t(i32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint32x3_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint32x3_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位无符号整数三向量 (x3) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(12)] +pub struct svuint32x3_t(u32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint32x3_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint32x3_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位浮点三向量 (x3) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(6)] +pub struct svfloat64x3_t(f64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat64x3_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat64x3_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位有符号整数三向量 (x3) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(6)] +pub struct svint64x3_t(i64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint64x3_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint64x3_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位无符号整数三向量 (x3) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(6)] +pub struct svuint64x3_t(u64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint64x3_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint64x3_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 8位有符号整数四向量 (x4) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(64)] +pub struct svint8x4_t(i8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint8x4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint8x4_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 8位无符号整数四向量 (x4) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(64)] +pub struct svuint8x4_t(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint8x4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint8x4_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位有符号整数四向量 (x4) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(32)] +pub struct svint16x4_t(i16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint16x4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint16x4_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位无符号整数四向量 (x4) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(32)] +pub struct svuint16x4_t(u16); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint16x4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint16x4_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位浮点四向量 (x4) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(16)] +pub struct svfloat32x4_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat32x4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat32x4_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位有符号整数四向量 (x4) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(16)] +pub struct svint32x4_t(i32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint32x4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint32x4_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 32位无符号整数四向量 (x4) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(16)] +pub struct svuint32x4_t(u32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint32x4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint32x4_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位浮点四向量 (x4) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(8)] +pub struct svfloat64x4_t(f64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat64x4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat64x4_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位有符号整数四向量 (x4) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(8)] +pub struct svint64x4_t(i64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svint64x4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svint64x4_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 64位无符号整数四向量 (x4) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(8)] +pub struct svuint64x4_t(u64); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svuint64x4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svuint64x4_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位浮点双向量 (x2) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(16)] +pub struct svfloat16x2_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat16x2_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat16x2_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位浮点三向量 (x3) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(24)] +pub struct svfloat16x3_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat16x3_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat16x3_t { + fn clone(&self) -> Self { *self } +} + +/// SVE 16位浮点四向量 (x4) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[rustc_scalable_vector(32)] +pub struct svfloat16x4_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat16x4_t {} +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat16x4_t { + fn clone(&self) -> Self { *self } +} + // ============================================================================ // SVE 辅助类型 // ============================================================================ -/// SVE模式类型 +/// SVE模式类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[derive(Copy, Clone, PartialEq, Eq, Debug, core::marker::ConstParamTy)] +pub struct svpattern(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svpattern { + /// 从原始字节创建模式值 + #[inline(always)] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const fn from_raw(value: u8) -> Self { + svpattern(value) + } + + /// 以原始字节形式返回模式值 + #[inline(always)] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const fn as_raw(self) -> u8 { + self.0 + } + + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_ALL: svpattern = svpattern(31); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL1: svpattern = svpattern(1); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL2: svpattern = svpattern(2); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL3: svpattern = svpattern(3); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL4: svpattern = svpattern(4); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL5: svpattern = svpattern(5); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL6: svpattern = svpattern(6); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL7: svpattern = svpattern(7); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL8: svpattern = svpattern(8); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL16: svpattern = svpattern(9); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL32: svpattern = svpattern(10); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL64: svpattern = svpattern(11); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL128: svpattern = svpattern(12); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_VL256: svpattern = svpattern(13); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_POW2: svpattern = svpattern(30); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_MUL4: svpattern = svpattern(29); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_MUL3: svpattern = svpattern(28); +} + +/// SVE预取操作类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[repr(C)] +#[derive(Copy, Clone, PartialEq, Eq, Debug, core::marker::ConstParamTy)] +pub struct svprfop(u8); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svprfop { + /// 从原始字节创建预取操作值 + #[inline(always)] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const fn from_raw(value: u8) -> Self { + svprfop(value) + } + + /// 以原始字节形式返回预取操作值 + #[inline(always)] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const fn as_raw(self) -> u8 { + self.0 + } +} + +// ============================================================================ +// 类型转换辅助函数(仅用于内部) +// ============================================================================ +// 注意:simd_cast 函数定义在父模块 mod.rs 中,使用 transmute_copy 避免 E0511 错误 + +// ============================================================================ +// 类 From trait 的转换方法 - 适用于交叉编译 +// ============================================================================ + +// 方案:使用 Associated Functions 提供类似 From::from 的 API +// 这些方法添加了 #[target_feature(enable = "sve")],可以在交叉编译时正常工作 + +/// svbool_t 的转换方法 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool_t { + /// 转换为 svbool2_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool2(self) -> svbool2_t { + simd_cast(self) + } + + /// 转换为 svbool4_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool4(self) -> svbool4_t { + simd_cast(self) + } + + /// 转换为 svbool8_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool8(self) -> svbool8_t { + simd_cast(self) + } +} + +/// svbool2_t 的转换方法 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool2_t { + /// 从 svbool_t 创建(类似 From::from) + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool(x: svbool_t) -> Self { + simd_cast(x) + } + + /// 转换为 svbool_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool(self) -> svbool_t { + simd_cast(self) + } + + /// 转换为 svbool4_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool4(self) -> svbool4_t { + simd_cast(self) + } + + /// 转换为 svbool8_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool8(self) -> svbool8_t { + simd_cast(self) + } + + /// 从 svbool4_t 创建 + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool4(x: svbool4_t) -> Self { + simd_cast(x) + } + + /// 从 svbool8_t 创建 + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool8(x: svbool8_t) -> Self { + simd_cast(x) + } +} + +/// svbool4_t 的转换方法 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool4_t { + /// 从 svbool_t 创建(类似 From::from) + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool(x: svbool_t) -> Self { + simd_cast(x) + } + + /// 转换为 svbool_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool(self) -> svbool_t { + simd_cast(self) + } + + /// 转换为 svbool2_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool2(self) -> svbool2_t { + simd_cast(self) + } + + /// 转换为 svbool8_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool8(self) -> svbool8_t { + simd_cast(self) + } + + /// 从 svbool2_t 创建 + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool2(x: svbool2_t) -> Self { + simd_cast(x) + } + + /// 从 svbool8_t 创建 + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool8(x: svbool8_t) -> Self { + simd_cast(x) + } +} + +/// svbool8_t 的转换方法 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl svbool8_t { + /// 从 svbool_t 创建(类似 From::from) + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool(x: svbool_t) -> Self { + simd_cast(x) + } + + /// 转换为 svbool_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool(self) -> svbool_t { + simd_cast(self) + } + + /// 转换为 svbool2_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool2(self) -> svbool2_t { + simd_cast(self) + } + + /// 转换为 svbool4_t + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn into_svbool4(self) -> svbool4_t { + simd_cast(self) + } + + /// 从 svbool2_t 创建 + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool2(x: svbool2_t) -> Self { + simd_cast(x) + } + + /// 从 svbool4_t 创建 + #[inline] + #[target_feature(enable = "sve")] + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub unsafe fn from_svbool4(x: svbool4_t) -> Self { + simd_cast(x) + } +} + +// ============================================================================ +// 类型转换 Trait - 用于生成的代码 +// ============================================================================ + +/// 转换为无符号向量类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub trait AsUnsigned { + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + type Unsigned; + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + fn as_unsigned(self) -> Self::Unsigned; +} + +/// 转换为有符号向量类型 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub trait AsSigned { + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + type Signed; + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + fn as_signed(self) -> Self::Signed; +} + +// 为所有 SVE 整数类型实现转换 trait +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint8_t { + type Unsigned = svuint8_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint8_t { + type Signed = svint8_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint8_t { + type Unsigned = svuint8_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint8_t { + type Signed = svint8_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint16_t { + type Unsigned = svuint16_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint16_t { + type Signed = svint16_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint16_t { + type Unsigned = svuint16_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint16_t { + type Signed = svint16_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint32_t { + type Unsigned = svuint32_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint32_t { + type Signed = svint32_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint32_t { + type Unsigned = svuint32_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint32_t { + type Signed = svint32_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint64_t { + type Unsigned = svuint64_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint64_t { + type Signed = svint64_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint64_t { + type Unsigned = svuint64_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint64_t { + type Signed = svint64_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint8x2_t { + type Unsigned = svuint8x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint8x2_t { + type Signed = svint8x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint8x2_t { + type Unsigned = svuint8x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint8x2_t { + type Signed = svint8x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint16x2_t { + type Unsigned = svuint16x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint16x2_t { + type Signed = svint16x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint16x2_t { + type Unsigned = svuint16x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint16x2_t { + type Signed = svint16x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint32x2_t { + type Unsigned = svuint32x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint32x2_t { + type Signed = svint32x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint32x2_t { + type Unsigned = svuint32x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq, Debug, core::marker::ConstParamTy)] -pub struct svpattern(u8); +impl AsSigned for svint32x2_t { + type Signed = svint32x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl svpattern { - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_ALL: svpattern = svpattern(31); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL1: svpattern = svpattern(1); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL2: svpattern = svpattern(2); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL3: svpattern = svpattern(3); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL4: svpattern = svpattern(4); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL5: svpattern = svpattern(5); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL6: svpattern = svpattern(6); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL7: svpattern = svpattern(7); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL8: svpattern = svpattern(8); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL16: svpattern = svpattern(9); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL32: svpattern = svpattern(10); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL64: svpattern = svpattern(11); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL128: svpattern = svpattern(12); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_VL256: svpattern = svpattern(13); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_POW2: svpattern = svpattern(30); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_MUL4: svpattern = svpattern(29); - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub const SV_MUL3: svpattern = svpattern(28); +impl AsUnsigned for svuint64x2_t { + type Unsigned = svuint64x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } } -/// SVE预取操作类型 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq, Debug, core::marker::ConstParamTy)] -pub struct svprfop(u8); +impl AsSigned for svuint64x2_t { + type Signed = svint64x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } +} -// ============================================================================ -// 类型转换辅助函数(仅用于内部) -// ============================================================================ -// 注意:simd_cast 函数定义在父模块 mod.rs 中,使用 transmute_copy 避免 E0511 错误 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint64x2_t { + type Unsigned = svuint64x2_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} -// ============================================================================ -// 类 From trait 的转换方法 - 适用于交叉编译 -// ============================================================================ +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint64x2_t { + type Signed = svint64x2_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} -// 方案:使用 Associated Functions 提供类似 From::from 的 API -// 这些方法添加了 #[target_feature(enable = "sve")],可以在交叉编译时正常工作 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint8x3_t { + type Unsigned = svuint8x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} -/// svbool_t 的转换方法 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl svbool_t { - /// 转换为 svbool2_t - #[inline] - #[target_feature(enable = "sve")] - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub unsafe fn into_svbool2(self) -> svbool2_t { - simd_cast(self) +impl AsSigned for svuint8x3_t { + type Signed = svint8x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } } +} - /// 转换为 svbool4_t - #[inline] - #[target_feature(enable = "sve")] - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub unsafe fn into_svbool4(self) -> svbool4_t { - simd_cast(self) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint8x3_t { + type Unsigned = svuint8x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } } +} - /// 转换为 svbool8_t - #[inline] - #[target_feature(enable = "sve")] - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub unsafe fn into_svbool8(self) -> svbool8_t { - simd_cast(self) - } +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint8x3_t { + type Signed = svint8x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } } -/// svbool2_t 的转换方法 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl svbool2_t { - /// 从 svbool_t 创建(类似 From::from) - #[inline] - #[target_feature(enable = "sve")] - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub unsafe fn from_svbool(x: svbool_t) -> Self { - simd_cast(x) - } +impl AsUnsigned for svuint16x3_t { + type Unsigned = svuint16x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} - /// 转换为 svbool_t - #[inline] - #[target_feature(enable = "sve")] - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub unsafe fn into_svbool(self) -> svbool_t { - simd_cast(self) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint16x3_t { + type Signed = svint16x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } } } -/// svbool4_t 的转换方法 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl svbool4_t { - /// 从 svbool_t 创建(类似 From::from) - #[inline] - #[target_feature(enable = "sve")] - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub unsafe fn from_svbool(x: svbool_t) -> Self { - simd_cast(x) +impl AsUnsigned for svint16x3_t { + type Unsigned = svuint16x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } } +} - /// 转换为 svbool_t - #[inline] - #[target_feature(enable = "sve")] - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub unsafe fn into_svbool(self) -> svbool_t { - simd_cast(self) - } +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint16x3_t { + type Signed = svint16x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } } -/// svbool8_t 的转换方法 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl svbool8_t { - /// 从 svbool_t 创建(类似 From::from) - #[inline] - #[target_feature(enable = "sve")] - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub unsafe fn from_svbool(x: svbool_t) -> Self { - simd_cast(x) +impl AsUnsigned for svuint32x3_t { + type Unsigned = svuint32x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svuint32x3_t { + type Signed = svint32x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } } +} - /// 转换为 svbool_t - #[inline] - #[target_feature(enable = "sve")] - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - pub unsafe fn into_svbool(self) -> svbool_t { - simd_cast(self) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svint32x3_t { + type Unsigned = svuint32x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } } } -// ============================================================================ -// 类型转换 Trait - 用于生成的代码 -// ============================================================================ +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint32x3_t { + type Signed = svint32x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} -/// 转换为无符号向量类型 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub trait AsUnsigned { - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - type Unsigned; - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - fn as_unsigned(self) -> Self::Unsigned; +impl AsUnsigned for svuint64x3_t { + type Unsigned = svuint64x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { self } } -/// 转换为有符号向量类型 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub trait AsSigned { - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - type Signed; - #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] - fn as_signed(self) -> Self::Signed; +impl AsSigned for svuint64x3_t { + type Signed = svint64x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { + unsafe { simd_cast(self) } + } } -// 为所有 SVE 整数类型实现转换 trait #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsUnsigned for svuint8_t { - type Unsigned = svuint8_t; +impl AsUnsigned for svint64x3_t { + type Unsigned = svuint64x3_t; + #[inline(always)] + fn as_unsigned(self) -> Self::Unsigned { + unsafe { simd_cast(self) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsSigned for svint64x3_t { + type Signed = svint64x3_t; + #[inline(always)] + fn as_signed(self) -> Self::Signed { self } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl AsUnsigned for svuint8x4_t { + type Unsigned = svuint8x4_t; #[inline(always)] fn as_unsigned(self) -> Self::Unsigned { self } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsSigned for svuint8_t { - type Signed = svint8_t; +impl AsSigned for svuint8x4_t { + type Signed = svint8x4_t; #[inline(always)] fn as_signed(self) -> Self::Signed { unsafe { simd_cast(self) } @@ -400,8 +1344,8 @@ impl AsSigned for svuint8_t { } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsUnsigned for svint8_t { - type Unsigned = svuint8_t; +impl AsUnsigned for svint8x4_t { + type Unsigned = svuint8x4_t; #[inline(always)] fn as_unsigned(self) -> Self::Unsigned { unsafe { simd_cast(self) } @@ -409,22 +1353,22 @@ impl AsUnsigned for svint8_t { } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsSigned for svint8_t { - type Signed = svint8_t; +impl AsSigned for svint8x4_t { + type Signed = svint8x4_t; #[inline(always)] fn as_signed(self) -> Self::Signed { self } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsUnsigned for svuint16_t { - type Unsigned = svuint16_t; +impl AsUnsigned for svuint16x4_t { + type Unsigned = svuint16x4_t; #[inline(always)] fn as_unsigned(self) -> Self::Unsigned { self } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsSigned for svuint16_t { - type Signed = svint16_t; +impl AsSigned for svuint16x4_t { + type Signed = svint16x4_t; #[inline(always)] fn as_signed(self) -> Self::Signed { unsafe { simd_cast(self) } @@ -432,8 +1376,8 @@ impl AsSigned for svuint16_t { } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsUnsigned for svint16_t { - type Unsigned = svuint16_t; +impl AsUnsigned for svint16x4_t { + type Unsigned = svuint16x4_t; #[inline(always)] fn as_unsigned(self) -> Self::Unsigned { unsafe { simd_cast(self) } @@ -441,22 +1385,22 @@ impl AsUnsigned for svint16_t { } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsSigned for svint16_t { - type Signed = svint16_t; +impl AsSigned for svint16x4_t { + type Signed = svint16x4_t; #[inline(always)] fn as_signed(self) -> Self::Signed { self } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsUnsigned for svuint32_t { - type Unsigned = svuint32_t; +impl AsUnsigned for svuint32x4_t { + type Unsigned = svuint32x4_t; #[inline(always)] fn as_unsigned(self) -> Self::Unsigned { self } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsSigned for svuint32_t { - type Signed = svint32_t; +impl AsSigned for svuint32x4_t { + type Signed = svint32x4_t; #[inline(always)] fn as_signed(self) -> Self::Signed { unsafe { simd_cast(self) } @@ -464,8 +1408,8 @@ impl AsSigned for svuint32_t { } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsUnsigned for svint32_t { - type Unsigned = svuint32_t; +impl AsUnsigned for svint32x4_t { + type Unsigned = svuint32x4_t; #[inline(always)] fn as_unsigned(self) -> Self::Unsigned { unsafe { simd_cast(self) } @@ -473,22 +1417,22 @@ impl AsUnsigned for svint32_t { } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsSigned for svint32_t { - type Signed = svint32_t; +impl AsSigned for svint32x4_t { + type Signed = svint32x4_t; #[inline(always)] fn as_signed(self) -> Self::Signed { self } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsUnsigned for svuint64_t { - type Unsigned = svuint64_t; +impl AsUnsigned for svuint64x4_t { + type Unsigned = svuint64x4_t; #[inline(always)] fn as_unsigned(self) -> Self::Unsigned { self } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsSigned for svuint64_t { - type Signed = svint64_t; +impl AsSigned for svuint64x4_t { + type Signed = svint64x4_t; #[inline(always)] fn as_signed(self) -> Self::Signed { unsafe { simd_cast(self) } @@ -496,8 +1440,8 @@ impl AsSigned for svuint64_t { } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsUnsigned for svint64_t { - type Unsigned = svuint64_t; +impl AsUnsigned for svint64x4_t { + type Unsigned = svuint64x4_t; #[inline(always)] fn as_unsigned(self) -> Self::Unsigned { unsafe { simd_cast(self) } @@ -505,8 +1449,8 @@ impl AsUnsigned for svint64_t { } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl AsSigned for svint64_t { - type Signed = svint64_t; +impl AsSigned for svint64x4_t { + type Signed = svint64x4_t; #[inline(always)] fn as_signed(self) -> Self::Signed { self } } From bdceb6170c6ac6897c4787e52ffc9647dd704296 Mon Sep 17 00:00:00 2001 From: wxh Date: Fri, 14 Nov 2025 12:01:45 +0800 Subject: [PATCH 15/27] =?UTF-8?q?=E7=AC=AC3=E6=89=B9Intrinsics=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E8=A1=A5=E5=85=A8=EF=BC=8C=E7=B4=AF=E8=AE=A1=E5=B7=B2?= =?UTF-8?q?=E8=A1=A5=EF=BC=9A=EF=BC=88`svadd/svsub/svmul`,=20`svabd`,=20`s?= =?UTF-8?q?vabs`,=20`svcnot`=20=EF=BC=89=EF=BC=88`svcmp*`,=20`svc*`=20?= =?UTF-8?q?=EF=BC=89=EF=BC=88`svaddv`,=20`svcnt*`,=20`svclz/cls`=20?= =?UTF-8?q?=EF=BC=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/sve.rs | 5301 ++++++++++++----- 1 file changed, 3747 insertions(+), 1554 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs index 07d7125210b84..7f8075451f940 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs @@ -2047,2879 +2047,5072 @@ pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { svcmplt_f64(pg, op1, svdup_n_f64(op2)) } -#[doc = "Compare less than"] + +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { - svcmpgt_s8(pg, op2, op1) +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv4f32")] + fn _svcmpne_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpne_f32(simd_cast(pg), op1, op2)) } } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmplt_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpne_f32(pg, op1, svdup_n_f32(op2)) } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { - svcmpgt_s16(pg, op2, op1) +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv2f64")] + fn _svcmpne_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpne_f64(simd_cast(pg), op1, op2)) } } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmplt_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpne_f64(pg, op1, svdup_n_f64(op2)) } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { - svcmpgt_s32(pg, op2, op1) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv16i8")] + fn _svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpne_s8(pg, op1, op2) } } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmplt_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpne_s8(pg, op1, svdup_n_s8(op2)) } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { - svcmpgt_s64(pg, op2, op1) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv8i16")] + fn _svcmpne_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { simd_cast(_svcmpne_s16(simd_cast(pg), op1, op2)) } } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmplt_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpne_s16(pg, op1, svdup_n_s16(op2)) } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - svcmpgt_u8(pg, op2, op1) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv4i32")] + fn _svcmpne_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpne_s32(simd_cast(pg), op1, op2)) } } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmplt_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpne_s32(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - svcmpgt_u16(pg, op2, op1) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv2i64")] + fn _svcmpne_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpne_s64(simd_cast(pg), op1, op2)) } } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmplt_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpne_s64(pg, op1, svdup_n_s64(op2)) } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - svcmpgt_u32(pg, op2, op1) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpne_s8(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmplt_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpne_u8(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - svcmpgt_u64(pg, op2, op1) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpne_s16(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Compare less than"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmplt_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpne_u16(pg, op1, svdup_n_u16(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32" - )] - fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; - } - unsafe { _svcvt_f32_s32_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpne_s32(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Floating-point convert"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { - unsafe { svcvt_f32_s32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpne_u32(pg, op1, svdup_n_u32(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { - svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpne_s64(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Floating-point convert"] +#[doc = "Compare not equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")] - fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; - } - unsafe { _svcvt_f32_s64_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpne_u64(pg, op1, svdup_n_u64(op2)) } -#[doc = "Floating-point convert"] + +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { - unsafe { svcvt_f32_s64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv4f32")] + fn _svcmpge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpge_f32(simd_cast(pg), op1, op2)) } } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { - svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpge_f32(pg, op1, svdup_n_f32(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t { +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32" - )] - fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv2f64")] + fn _svcmpge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; } - unsafe { _svcvt_f32_u32_m(inactive, simd_cast(pg), op.as_signed()) } + unsafe { simd_cast(_svcmpge_f64(simd_cast(pg), op1, op2)) } } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { - unsafe { svcvt_f32_u32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpge_f64(pg, op1, svdup_n_f64(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { - svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv16i8")] + fn _svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpge_s8(pg, op1, op2) } } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t { +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpge_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")] - fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv8i16")] + fn _svcmpge_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; } - unsafe { _svcvt_f32_u64_m(inactive, simd_cast(pg), op.as_signed()) } + unsafe { simd_cast(_svcmpge_s16(simd_cast(pg), op1, op2)) } } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { - unsafe { svcvt_f32_u64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpge_s16(pg, op1, svdup_n_s16(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { - svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv4i32")] + fn _svcmpge_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpge_s32(simd_cast(pg), op1, op2)) } } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t { +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpge_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv4i32" - )] - fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv2i64")] + fn _svcmpge_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; } - unsafe { _svcvt_f64_s32_m(inactive, simd_cast(pg), op) } + unsafe { simd_cast(_svcmpge_s64(simd_cast(pg), op1, op2)) } } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { - unsafe { svcvt_f64_s32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpge_s64(pg, op1, svdup_n_s64(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { - svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpge_s8(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64" - )] - fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; - } - unsafe { _svcvt_f64_s64_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpge_u8(pg, op1, svdup_n_u8(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { - unsafe { svcvt_f64_s64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpge_s16(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { - svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpge_u16(pg, op1, svdup_n_u16(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv4i32" - )] - fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; - } - unsafe { _svcvt_f64_u32_m(inactive, simd_cast(pg), op.as_signed()) } +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpge_s32(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { - unsafe { svcvt_f64_u32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpge_u32(pg, op1, svdup_n_u32(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { - svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpge_s64(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Floating-point convert"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64" - )] - fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; - } - unsafe { _svcvt_f64_u64_m(inactive, simd_cast(pg), op.as_signed()) } +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpge_u64(pg, op1, svdup_n_u64(op2)) } -#[doc = "Floating-point convert"] + +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { - unsafe { svcvt_f64_u64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(fcmle))] +pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmple.nxv4f32")] + fn _svcmple_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmple_f32(simd_cast(pg), op1, op2)) } } -#[doc = "Floating-point convert"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { - svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(fcmle))] +pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmple_f32(pg, op1, svdup_n_f32(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_f32(op: f32) -> svfloat32_t { +#[cfg_attr(test, assert_instr(fcmle))] +pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] - fn _svdup_n_f32(op: f32) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmple.nxv2f64")] + fn _svcmple_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; } - unsafe { _svdup_n_f32(op) } + unsafe { simd_cast(_svcmple_f64(simd_cast(pg), op1, op2)) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_f64(op: f64) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")] - fn _svdup_n_f64(op: f64) -> svfloat64_t; - } - unsafe { _svdup_n_f64(op) } +#[cfg_attr(test, assert_instr(fcmle))] +pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmple_f64(pg, op1, svdup_n_f64(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s8(op: i8) -> svint8_t { +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")] - fn _svdup_n_s8(op: i8) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv16i8")] + fn _svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; } - unsafe { _svdup_n_s8(op) } + unsafe { _svcmple_s8(pg, op1, op2) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s16(op: i16) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")] - fn _svdup_n_s16(op: i16) -> svint16_t; - } - unsafe { _svdup_n_s16(op) } +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmple_s8(pg, op1, svdup_n_s8(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s32(op: i32) -> svint32_t { +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] - fn _svdup_n_s32(op: i32) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv8i16")] + fn _svcmple_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; } - unsafe { _svdup_n_s32(op) } + unsafe { simd_cast(_svcmple_s16(simd_cast(pg), op1, op2)) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s64(op: i64) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] - fn _svdup_n_s64(op: i64) -> svint64_t; - } - unsafe { _svdup_n_s64(op) } +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmple_s16(pg, op1, svdup_n_s16(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u8(op: u8) -> svuint8_t { - unsafe { svdup_n_s8(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv4i32")] + fn _svcmple_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmple_s32(simd_cast(pg), op1, op2)) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u16(op: u16) -> svuint16_t { - unsafe { svdup_n_s16(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmple_s32(pg, op1, svdup_n_s32(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u32(op: u32) -> svuint32_t { - unsafe { svdup_n_s32(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv2i64")] + fn _svcmple_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmple_s64(simd_cast(pg), op1, op2)) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u64(op: u64) -> svuint64_t { - unsafe { svdup_n_s64(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmple_s64(pg, op1, svdup_n_s64(op2)) } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] - fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; - } - _svld1_f32(simd_cast(pg), base) +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmple_s8(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] - fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; - } - _svld1_f64(simd_cast(pg), base) +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmple_u8(pg, op1, svdup_n_u8(op2)) } -#[doc = "Unextended load"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"] -#[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1b))] -pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] - fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t; - } - _svld1_s8(pg, base) +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmple_s16(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1h))] -pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] - fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; - } - _svld1_s16(simd_cast(pg), base) +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmple_u16(pg, op1, svdup_n_u16(op2)) } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] - fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; - } - _svld1_s32(simd_cast(pg), base) +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmple_s32(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] - fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; - } - _svld1_s64(simd_cast(pg), base) +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmple_u32(pg, op1, svdup_n_u32(op2)) } -#[doc = "Unextended load"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"] -#[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1b))] -pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { - svld1_s8(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmple_s64(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1h))] -pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { - svld1_s16(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmple_u64(pg, op1, svdup_n_u64(op2)) } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { - svld1_s32(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpgt_s8(pg, op2, op1) } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { - svld1_s64(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmplt_s8(pg, op1, svdup_n_s8(op2)) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")] - fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; - } - unsafe { _svmul_f32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpgt_s16(pg, op2, op1) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svmul_f32_m(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmplt_s16(pg, op1, svdup_n_s16(op2)) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svmul_f32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpgt_s32(pg, op2, op1) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svmul_f32_x(pg, op1, svdup_n_f32(op2)) -} -#[doc = "Multiply"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] -#[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmplt_s32(pg, op1, svdup_n_s32(op2)) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svmul_f32_z(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpgt_s64(pg, op2, op1) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")] - fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; - } - unsafe { _svmul_f64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmplt_s64(pg, op1, svdup_n_s64(op2)) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svmul_f64_m(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpgt_u8(pg, op2, op1) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svmul_f64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmplt_u8(pg, op1, svdup_n_u8(op2)) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svmul_f64_x(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpgt_u16(pg, op2, op1) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmplt_u16(pg, op1, svdup_n_u16(op2)) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svmul_f64_z(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpgt_u32(pg, op2, op1) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")] - fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; - } - unsafe { _svmul_s8_m(pg, op1, op2) } +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmplt_u32(pg, op1, svdup_n_u32(op2)) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svmul_s8_m(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpgt_u64(pg, op2, op1) } -#[doc = "Multiply"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svmul_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmplt_u64(pg, op1, svdup_n_u64(op2)) } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svmul_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s32_m(inactive, simd_cast(pg), op) } } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe { svcvt_f32_s32_m(simd_reinterpret(op), pg, op) } } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svmul_s8_z(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { + svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op) } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")] - fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")] + fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; } - unsafe { _svmul_s16_m(simd_cast(pg), op1, op2) } + unsafe { _svcvt_f32_s64_m(inactive, simd_cast(pg), op) } } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svmul_s16_m(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe { svcvt_f32_s64_m(simd_reinterpret(op), pg, op) } } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svmul_s16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { + svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op) } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svmul_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u32_m(inactive, simd_cast(pg), op.as_signed()) } } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe { svcvt_f32_u32_m(simd_reinterpret(op), pg, op) } } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svmul_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op) } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")] - fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")] + fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; } - unsafe { _svmul_s32_m(simd_cast(pg), op1, op2) } + unsafe { _svcvt_f32_u64_m(inactive, simd_cast(pg), op.as_signed()) } } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svmul_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe { svcvt_f32_u64_m(simd_reinterpret(op), pg, op) } } -#[doc = "Multiply"] +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s32_m(inactive, simd_cast(pg), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe { svcvt_f64_s32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { + svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s64_m(inactive, simd_cast(pg), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe { svcvt_f64_s64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { + svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u32_m(inactive, simd_cast(pg), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe { svcvt_f64_u32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u64_m(inactive, simd_cast(pg), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe { svcvt_f64_u64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32(op: f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] + fn _svdup_n_f32(op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64(op: f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")] + fn _svdup_n_f64(op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8(op: i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")] + fn _svdup_n_s8(op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16(op: i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")] + fn _svdup_n_s16(op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32(op: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64(op: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] + fn _svdup_n_s64(op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8(op: u8) -> svuint8_t { + unsafe { svdup_n_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16(op: u16) -> svuint16_t { + unsafe { svdup_n_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32(op: u32) -> svuint32_t { + unsafe { svdup_n_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64(op: u64) -> svuint64_t { + unsafe { svdup_n_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] + fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1_f32(simd_cast(pg), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] + fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1_f64(simd_cast(pg), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] + fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1_s8(pg, base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] + fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1_s16(simd_cast(pg), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] + fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1_s32(simd_cast(pg), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] + fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1_s64(simd_cast(pg), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")] + fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmul_f32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")] + fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmul_f64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")] + fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmul_s8_m(pg, op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")] + fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmul_s16_m(simd_cast(pg), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")] + fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmul_s32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")] + fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_s32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")] + fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmul_s64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")] + fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svorr_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")] + fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svorr_s16_m(simd_cast(pg), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")] + fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svorr_s32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")] + fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svorr_s64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b8() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")] + fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t; + } + unsafe { _svptrue_pat_b8(PATTERN) } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b16() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")] + fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t; + } + unsafe { simd_cast(_svptrue_pat_b16(PATTERN)) } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b32() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")] + fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t; + } + unsafe { simd_cast(_svptrue_pat_b32(PATTERN)) } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b64() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")] + fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t; + } + unsafe { simd_cast(_svptrue_pat_b64(PATTERN)) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] + fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst1_f32(data, simd_cast(pg), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] + fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst1_f64(data, simd_cast(pg), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] + fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst1_s8(data, pg, base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] + fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst1_s16(data, simd_cast(pg), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] + fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst1_s32(data, simd_cast(pg), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] + fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst1_s64(data, simd_cast(pg), base) +} +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svmul_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svst1_s8(pg, base.as_signed(), data.as_signed()) } -#[doc = "Multiply"] +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svmul_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svst1_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svst1_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svst1_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")] + fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsub_f32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")] + fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsub_f64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")] + fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsub_s8_m(pg, op1, op2) } +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")] + fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsub_s16_m(simd_cast(pg), op1, op2) } } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svmul_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, op1, op2) } -#[doc = "Divide"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")] - fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svdiv_s32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Divide"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svdiv_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Divide"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svdiv_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_z(pg, op1, svdup_n_s16(op2)) } -#[doc = "Divide"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svdiv_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")] + fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsub_s32_m(simd_cast(pg), op1, op2) } } -#[doc = "Divide"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Divide"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svdiv_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, op1, op2) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")] - fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; - } - unsafe { _svmul_s64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svmul_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svmul_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_z(pg, op1, svdup_n_s32(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svmul_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")] + fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsub_s64_m(simd_cast(pg), op1, op2) } } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svmul_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, op1, op2) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svmul_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svmul_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_z(pg, op1, svdup_n_s64(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svmul_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svmul_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, op1, op2) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_x(pg, op1, svdup_n_u8(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svmul_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svmul_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_z(pg, op1, svdup_n_u8(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svmul_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_m(pg, op1, svdup_n_u16(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svmul_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, op1, op2) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_x(pg, op1, svdup_n_u16(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svmul_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svmul_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_z(pg, op1, svdup_n_u16(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svmul_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_m(pg, op1, svdup_n_u32(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svmul_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, op1, op2) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_x(pg, op1, svdup_n_u32(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svmul_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svmul_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_z(pg, op1, svdup_n_u32(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svmul_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Multiply"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svmul_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, op1, op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")] - fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; - } - unsafe { _svorr_s8_m(pg, op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svorr_s8_m(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svorr_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svorr_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv4f32")] + fn _svabd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabd_f32_m(simd_cast(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_m(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svorr_s8_z(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, op1, op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")] - fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; - } - unsafe { _svorr_s16_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_x(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svorr_s16_m(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svorr_s16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_z(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svorr_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv2f64")] + fn _svabd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svabd_f64_m(simd_cast(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_m(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svorr_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, op1, op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")] - fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svorr_s32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_x(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svorr_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svorr_s32_m(pg, op1, op2) +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_z(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svorr_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv16i8")] + fn _svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svabd_s8_m(pg, op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_m(pg, op1, svdup_n_s8(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svorr_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, op1, op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")] - fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; - } - unsafe { _svorr_s64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_x(pg, op1, svdup_n_s8(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svorr_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svorr_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_z(pg, op1, svdup_n_s8(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svorr_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv8i16")] + fn _svabd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svabd_s16_m(simd_cast(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svorr_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, op1, op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svorr_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svorr_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_z(pg, op1, svdup_n_s16(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svorr_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv4i32")] + fn _svabd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svabd_s32_m(simd_cast(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svorr_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, op1, op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svorr_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svorr_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_z(pg, op1, svdup_n_s32(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svorr_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv2i64")] + fn _svabd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svabd_s64_m(simd_cast(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svorr_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, op1, op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svorr_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svorr_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_z(pg, op1, svdup_n_s64(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svorr_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv16i8")] + fn _svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t; + } + unsafe { _svabd_u8_m(pg, op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svorr_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, op1, op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_x(pg, op1, svdup_n_u8(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svorr_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svorr_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_z(pg, op1, svdup_n_u8(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svorr_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv8i16")] + fn _svabd_u16_m(pg: svbool8_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t; + } + unsafe { _svabd_u16_m(simd_cast(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_m(pg, op1, svdup_n_u16(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svorr_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, op1, op2) } -#[doc = "Set predicate elements to true"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b8() -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")] - fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t; - } - unsafe { _svptrue_pat_b8(PATTERN) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_x(pg, op1, svdup_n_u16(op2)) } -#[doc = "Set predicate elements to true"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b16() -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")] - fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t; - } - unsafe { simd_cast(_svptrue_pat_b16(PATTERN)) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } -#[doc = "Set predicate elements to true"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b32() -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")] - fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t; - } - unsafe { simd_cast(_svptrue_pat_b32(PATTERN)) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_z(pg, op1, svdup_n_u16(op2)) } -#[doc = "Set predicate elements to true"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b64() -> svbool_t { +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")] - fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv4i32")] + fn _svabd_u32_m(pg: svbool4_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t; } - unsafe { simd_cast(_svptrue_pat_b64(PATTERN)) } + unsafe { _svabd_u32_m(simd_cast(pg), op1, op2) } } -#[doc = "Conditionally select elements"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { - unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_m(pg, op1, svdup_n_u32(op2)) } -#[doc = "Conditionally select elements"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, op1, op2) } -#[doc = "Conditionally select elements"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_x(pg, op1, svdup_n_u32(op2)) } -#[doc = "Conditionally select elements"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } -#[doc = "Conditionally select elements"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_z(pg, op1, svdup_n_u32(op2)) } -#[doc = "Conditionally select elements"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv2i64")] + fn _svabd_u64_m(pg: svbool2_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t; + } + unsafe { _svabd_u64_m(simd_cast(pg), op1, op2) } } -#[doc = "Conditionally select elements"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Conditionally select elements"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, op1, op2) } -#[doc = "Conditionally select elements"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Conditionally select elements"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } -#[doc = "Conditionally select elements"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"] + +#[doc = "Absolute value"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_m(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] - fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv4f32")] + fn _svabs_f32_m(pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; } - _svst1_f32(data, simd_cast(pg), base) + unsafe { _svabs_f32_m(simd_cast(pg), op) } } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] - fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); - } - _svst1_f64(data, simd_cast(pg), base) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(pg, op) } -#[doc = "Non-truncating store"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"] -#[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1b))] -pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] - fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); - } - _svst1_s8(data, pg, base) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(pg, svsel_f32(pg, op, svdup_n_f32(0.0))) } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1h))] -pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_m(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] - fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv2f64")] + fn _svabs_f64_m(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; } - _svst1_s16(data, simd_cast(pg), base) + unsafe { _svabs_f64_m(simd_cast(pg), op) } } -#[doc = "Non-truncating store"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"] -#[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] - fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); - } - _svst1_s32(data, simd_cast(pg), base) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(pg, op) } -#[doc = "Non-truncating store"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(pg, svsel_f64(pg, op, svdup_n_f64(0.0))) +} +#[doc = "Absolute value"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_m(pg: svbool_t, op: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] - fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv16i8")] + fn _svabs_s8_m(pg: svbool_t, op: svint8_t) -> svint8_t; } - _svst1_s64(data, simd_cast(pg), base) + unsafe { _svabs_s8_m(pg, op) } } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1b))] -pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { - svst1_s8(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(pg, op) } -#[doc = "Non-truncating store"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"] -#[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1h))] -pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { - svst1_s16(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(pg, svsel_s8(pg, op, svdup_n_s8(0))) } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { - svst1_s32(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_m(pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv8i16")] + fn _svabs_s16_m(pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svabs_s16_m(simd_cast(pg), op) } } -#[doc = "Non-truncating store"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(pg, op) +} +#[doc = "Absolute value"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { - svst1_s64(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(pg, svsel_s16(pg, op, svdup_n_s16(0))) } -#[doc = "Subtract"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_m(pg: svbool_t, op: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")] - fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv4i32")] + fn _svabs_s32_m(pg: svbool4_t, op: svint32_t) -> svint32_t; } - unsafe { _svsub_f32_m(simd_cast(pg), op1, op2) } + unsafe { _svabs_s32_m(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsub_f32_m(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(pg, op) } -#[doc = "Subtract"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svsub_f32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(pg, svsel_s32(pg, op, svdup_n_s32(0))) } -#[doc = "Subtract"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsub_f32_x(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_m(pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv2i64")] + fn _svabs_s64_m(pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svabs_s64_m(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(pg, op) } -#[doc = "Subtract"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsub_f32_z(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(pg, svsel_s64(pg, op, svdup_n_s64(0))) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")] - fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv16i8")] + fn _svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; } - unsafe { _svsub_f64_m(simd_cast(pg), op1, op2) } + unsafe { _svcnot_s8_m(inactive, pg, op) } } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsub_f64_m(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(op, pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svsub_f64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(svdup_n_s8(0), pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsub_f64_x(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv8i16")] + fn _svcnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcnot_s16_m(inactive, simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(op, pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsub_f64_z(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(svdup_n_s16(0), pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")] - fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv4i32")] + fn _svcnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; } - unsafe { _svsub_s8_m(pg, op1, op2) } + unsafe { _svcnot_s32_m(inactive, simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsub_s8_m(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(op, pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svsub_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(svdup_n_s32(0), pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsub_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv2i64")] + fn _svcnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcnot_s64_m(inactive, simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(op, pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsub_s8_z(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(svdup_n_s64(0), pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")] - fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; - } - unsafe { _svsub_s16_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Conditional bitwise NOT"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(op, pg, op) +} +#[doc = "Conditional bitwise NOT"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(svdup_n_u8(0), pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsub_s16_m(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svsub_s16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(op, pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsub_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(svdup_n_u16(0), pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsub_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(op, pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")] - fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svsub_s32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(svdup_n_u32(0), pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsub_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svsub_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(op, pg, op) } -#[doc = "Subtract"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsub_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(svdup_n_u64(0), pg, op) } -#[doc = "Subtract"] +// ============================================================================ +// Batch 3: Reduction/Horizontal Operations +// ============================================================================ +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv16i8")] + fn _svaddv_s8(pg: svbool8_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_s8(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsub_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv8i16")] + fn _svaddv_s16(pg: svbool4_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_s16(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")] - fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv4i32")] + fn _svaddv_s32(pg: svbool2_t, op: svint32_t) -> i64; } - unsafe { _svsub_s64_m(simd_cast(pg), op1, op2) } + unsafe { _svaddv_s32(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsub_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv2i64")] + fn _svaddv_s64(pg: svbool_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_s64(pg, op) } } -#[doc = "Subtract"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svsub_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 { + unsafe { svaddv_s8(pg, op.as_signed()) as u64 } } -#[doc = "Subtract"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsub_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 { + unsafe { svaddv_s16(pg, op.as_signed()) as u64 } } -#[doc = "Subtract"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 { + unsafe { svaddv_s32(pg, op.as_signed()) as u64 } } -#[doc = "Subtract"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsub_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svaddv_s64(pg, op.as_signed()) as u64 } } -#[doc = "Subtract"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv4f32")] + fn _svaddv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svaddv_f32(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsub_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv2f64")] + fn _svaddv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svaddv_f64(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)]"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svsub_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cntb))] +pub fn svcntb() -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntb")] + fn _svcntb() -> i32; + } + unsafe { _svcntb() } } -#[doc = "Subtract"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)]"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsub_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(cnth))] +pub fn svcnth() -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnth")] + fn _svcnth() -> i32; + } + unsafe { _svcnth() } } -#[doc = "Subtract"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)]"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(cntd))] +pub fn svcntd() -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntd")] + fn _svcntd() -> i32; + } + unsafe { _svcntd() } } -#[doc = "Subtract"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsub_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv16i1")] + fn _svcntp_b8(pg: svbool8_t, op: svbool8_t) -> u64; + } + unsafe { _svcntp_b8(simd_cast(pg), simd_cast(op)) } } -#[doc = "Subtract"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv8i1")] + fn _svcntp_b16(pg: svbool4_t, op: svbool4_t) -> u64; + } + unsafe { _svcntp_b16(simd_cast(pg), simd_cast(op)) } } -#[doc = "Subtract"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsub_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv4i1")] + fn _svcntp_b32(pg: svbool2_t, op: svbool2_t) -> u64; + } + unsafe { _svcntp_b32(simd_cast(pg), simd_cast(op)) } } -#[doc = "Subtract"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svsub_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv2i1")] + fn _svcntp_b64(pg: svbool_t, op: svbool_t) -> u64; + } + unsafe { _svcntp_b64(pg, op) } } -#[doc = "Subtract"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsub_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8(pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv16i8")] + fn _svclz_s8(pg: svbool8_t, op: svint8_t) -> svint8_t; + } + unsafe { _svclz_s8(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16(pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv8i16")] + fn _svclz_s16(pg: svbool4_t, op: svint16_t) -> svint16_t; + } + unsafe { _svclz_s16(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsub_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32(pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv4i32")] + fn _svclz_s32(pg: svbool2_t, op: svint32_t) -> svint32_t; + } + unsafe { _svclz_s32(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64(pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv2i64")] + fn _svclz_s64(pg: svbool_t, op: svint64_t) -> svint64_t; + } + unsafe { _svclz_s64(pg, op) } } -#[doc = "Subtract"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsub_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8(pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svclz_s8(pg, op.as_signed()).as_unsigned() } } -#[doc = "Subtract"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svsub_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16(pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svclz_s16(pg, op.as_signed()).as_unsigned() } } -#[doc = "Subtract"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsub_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svclz_s32(pg, op.as_signed()).as_unsigned() } } -#[doc = "Subtract"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svclz_s64(pg, op.as_signed()).as_unsigned() } } -#[doc = "Subtract"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsub_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8(pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv16i8")] + fn _svcls_s8(pg: svbool8_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcls_s8(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16(pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv8i16")] + fn _svcls_s16(pg: svbool4_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcls_s16(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsub_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32(pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv4i32")] + fn _svcls_s32(pg: svbool2_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcls_s32(simd_cast(pg), op) } } -#[doc = "Subtract"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svsub_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64(pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv2i64")] + fn _svcls_s64(pg: svbool_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcls_s64(pg, op) } } -#[doc = "Subtract"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsub_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_u8(pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcls_s8(pg, op.as_signed()).as_unsigned() } } -#[doc = "Subtract"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_u16(pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcls_s16(pg, op.as_signed()).as_unsigned() } } -#[doc = "Subtract"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsub_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcls_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcls_s64(pg, op.as_signed()).as_unsigned() } } From 191a1f0b86a489a016b90a723d8ca97a8e0aac9e Mon Sep 17 00:00:00 2001 From: wxh Date: Mon, 17 Nov 2025 17:12:31 +0800 Subject: [PATCH 16/27] =?UTF-8?q?=E7=AC=AC5=E6=89=B9Intrinsics=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E8=A1=A5=E5=85=A8=EF=BC=8C=E7=B4=AF=E8=AE=A1=E5=B7=B2?= =?UTF-8?q?=E8=A1=A5=EF=BC=9A=EF=BC=88`svadd/svsub/svmul`,=20`svabd`,=20`s?= =?UTF-8?q?vabs`,=20`svcnot`=20=EF=BC=89=EF=BC=88`svcmp*`,=20`svc*`=20?= =?UTF-8?q?=EF=BC=89=EF=BC=88`svaddv`,=20`svcnt*`,=20`svclz/cls`=20?= =?UTF-8?q?=EF=BC=89=EF=BC=88`svadr*`,=20`svld*`,=20`svst*`=20=EF=BC=89?= =?UTF-8?q?=EF=BC=88`svbic*`,=20`svbrk*`,=20`svcmla`=EF=BC=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/sve.rs | 7422 ++++++++++------- 1 file changed, 4374 insertions(+), 3048 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs index 7f8075451f940..91e1adc682aa3 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs @@ -1308,5811 +1308,7137 @@ pub fn svand_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t pub fn svand_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { svand_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_b]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f32_m( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, -) -> svfloat32_t { - static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv4f32")] - fn _svcadd_f32_m( - pg: svbool4_t, - op1: svfloat32_t, - op2: svfloat32_t, - imm_rotation: i32, - ) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.z.nvx16i1")] + fn _svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; } - unsafe { _svcadd_f32_m(simd_cast(pg), op1, op2, IMM_ROTATION) } + unsafe { _svbic_b_z(pg, op1, op2) } } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f32_x( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, -) -> svfloat32_t { - svcadd_f32_m::(pg, op1, op2) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv16i8")] + fn _svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbic_s8_m(pg, op1, op2) } } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f32_z( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, -) -> svfloat32_t { - svcadd_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_m(pg, op1, svdup_n_s8(op2)) } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f64_m( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, -) -> svfloat64_t { - static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv2f64")] - fn _svcadd_f64_m( - pg: svbool2_t, - op1: svfloat64_t, - op2: svfloat64_t, - imm_rotation: i32, - ) -> svfloat64_t; - } - unsafe { _svcadd_f64_m(simd_cast(pg), op1, op2, IMM_ROTATION) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, op1, op2) } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f64_x( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, -) -> svfloat64_t { - svcadd_f64_m::(pg, op1, op2) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_x(pg, op1, svdup_n_s8(op2)) } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f64_z( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, -) -> svfloat64_t { - svcadd_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f32_m( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, -) -> svfloat32_t { - static_assert!( - IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 - ); - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv4f32")] - fn _svcmla_f32_m( - pg: svbool4_t, - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, - imm_rotation: i32, - ) -> svfloat32_t; - } - unsafe { _svcmla_f32_m(simd_cast(pg), op1, op2, op3, IMM_ROTATION) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_z(pg, op1, svdup_n_s8(op2)) } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f32_x( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, -) -> svfloat32_t { - svcmla_f32_m::(pg, op1, op2, op3) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv8i16")] + fn _svbic_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbic_s16_m(simd_cast(pg), op1, op2) } } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f32_z( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, -) -> svfloat32_t { - svcmla_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f64_m( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, - op3: svfloat64_t, -) -> svfloat64_t { - static_assert!( - IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 - ); - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv2f64")] - fn _svcmla_f64_m( - pg: svbool2_t, - op1: svfloat64_t, - op2: svfloat64_t, - op3: svfloat64_t, - imm_rotation: i32, - ) -> svfloat64_t; - } - unsafe { _svcmla_f64_m(simd_cast(pg), op1, op2, op3, IMM_ROTATION) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, op1, op2) } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f64_x( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, - op3: svfloat64_t, -) -> svfloat64_t { - svcmla_f64_m::(pg, op1, op2, op3) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f64_z( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, - op3: svfloat64_t, -) -> svfloat64_t { - svcmla_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))] -pub fn svcmla_lane_f32( - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, -) -> svfloat32_t { - static_assert_range!(IMM_INDEX, 0, 1); - static_assert!( - IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 - ); - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.fcmla.lane.x.nxv4f32" - )] - fn _svcmla_lane_f32( - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, - imm_index: i32, - imm_rotation: i32, - ) -> svfloat32_t; - } - unsafe { _svcmla_lane_f32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_z(pg, op1, svdup_n_s16(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmeq))] -pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv4f32")] - fn _svcmpeq_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv4i32")] + fn _svbic_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; } - unsafe { simd_cast(_svcmpeq_f32(simd_cast(pg), op1, op2)) } + unsafe { _svbic_s32_m(simd_cast(pg), op1, op2) } } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmeq))] -pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmpeq_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmeq))] -pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv2f64")] - fn _svcmpeq_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpeq_f64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, op1, op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmeq))] -pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmpeq_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv16i8")] - fn _svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; - } - unsafe { _svcmpeq_s8(pg, op1, op2) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmpeq_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_z(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv8i16")] - fn _svcmpeq_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv2i64")] + fn _svbic_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { simd_cast(_svcmpeq_s16(simd_cast(pg), op1, op2)) } + unsafe { _svbic_s64_m(simd_cast(pg), op1, op2) } } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmpeq_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv4i32")] - fn _svcmpeq_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmpeq_s32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, op1, op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmpeq_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv2i64")] - fn _svcmpeq_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpeq_s64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmpeq_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_z(pg, op1, svdup_n_s64(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - unsafe { svcmpeq_s8(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svbic_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmpeq_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - unsafe { svcmpeq_s16(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, op1, op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmpeq_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_x(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - unsafe { svcmpeq_s32(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmpeq_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_z(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - unsafe { svcmpeq_s64(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svbic_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Compare equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmpeq_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_m(pg, op1, svdup_n_u16(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv4f32")] - fn _svcmpgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmpgt_f32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, op1, op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmpgt_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_x(pg, op1, svdup_n_u16(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv2f64")] - fn _svcmpgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpgt_f64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmpgt_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_z(pg, op1, svdup_n_u16(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv16i8")] - fn _svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; - } - unsafe { _svcmpgt_s8(pg, op1, op2) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svbic_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmpgt_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_m(pg, op1, svdup_n_u32(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv8i16")] - fn _svcmpgt_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; - } - unsafe { simd_cast(_svcmpgt_s16(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, op1, op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmpgt_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_x(pg, op1, svdup_n_u32(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv4i32")] - fn _svcmpgt_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmpgt_s32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmpgt_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_z(pg, op1, svdup_n_u32(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv2i64")] - fn _svcmpgt_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpgt_s64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svbic_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmpgt_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - unsafe { svcmpgt_s8(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, op1, op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmpgt_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - unsafe { svcmpgt_s16(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmpgt_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than"] +#[doc = "Break after first true condition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"] -#[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - unsafe { svcmpgt_s32(pg, op1.as_signed(), op2.as_signed()) } -} -#[doc = "Compare greater than"] +#[doc = "Break after first true condition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmpgt_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.nxv16i1")] + fn _svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrka_b_m(inactive, pg, op) } } -#[doc = "Compare greater than"] +#[doc = "Break after first true condition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - unsafe { svcmpgt_s64(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.z.nxv16i1")] + fn _svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrka_b_z(pg, op) } } -#[doc = "Compare greater than"] +#[doc = "Break before first true condition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmpgt_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.nxv16i1")] + fn _svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrkb_b_m(inactive, pg, op) } } -#[doc = "Compare less than"] +#[doc = "Break before first true condition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { - svcmpgt_f32(pg, op2, op1) +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.z.nxv16i1")] + fn _svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrkb_b_z(pg, op) } } -#[doc = "Compare less than"] +#[doc = "Propagate break to next partition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkn[_b]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmplt_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(brkn))] +pub fn svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkn.z.nxv16i1")] + fn _svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkn_b_z(pg, op1, op2) } } -#[doc = "Compare less than"] +#[doc = "Break after first true condition, propagating from previous partition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpa[_b]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - svcmpgt_f64(pg, op2, op1) +#[cfg_attr(test, assert_instr(brkpa))] +pub fn svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpa.z.nxv16i1" + )] + fn _svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpa_b_z(pg, op1, op2) } } -#[doc = "Compare less than"] +#[doc = "Break before first true condition, propagating from previous partition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpb[_b]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmplt_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(brkpb))] +pub fn svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpb.z.nxv16i1" + )] + fn _svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpb_b_z(pg, op1, op2) } } - -#[doc = "Compare not equal to"] +#[doc = "Complex add with rotate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmne))] -pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv4f32")] - fn _svcmpne_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv4f32")] + fn _svcadd_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; } - unsafe { simd_cast(_svcmpne_f32(simd_cast(pg), op1, op2)) } + unsafe { _svcadd_f32_m(simd_cast(pg), op1, op2, IMM_ROTATION) } } -#[doc = "Compare not equal to"] +#[doc = "Complex add with rotate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmne))] -pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmpne_f32(pg, op1, svdup_n_f32(op2)) -} -#[doc = "Compare not equal to"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv2f64")] + fn _svcadd_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcadd_f64_m(simd_cast(pg), op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv4f32")] + fn _svcmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_f32_m(simd_cast(pg), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv2f64")] + fn _svcmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcmla_f64_m(simd_cast(pg), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fcmla.lane.x.nxv4f32" + )] + fn _svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_lane_f32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv4f32")] + fn _svcmpeq_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpeq_f32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpeq_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv2f64")] + fn _svcmpeq_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpeq_f64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpeq_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv16i8")] + fn _svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpeq_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpeq_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv8i16")] + fn _svcmpeq_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { simd_cast(_svcmpeq_s16(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpeq_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv4i32")] + fn _svcmpeq_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpeq_s32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpeq_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv2i64")] + fn _svcmpeq_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpeq_s64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpeq_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpeq_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpeq_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpeq_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpeq_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpeq_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpeq_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpeq_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpeq_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv4f32")] + fn _svcmpgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpgt_f32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv2f64")] + fn _svcmpgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpgt_f64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv16i8")] + fn _svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpgt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv8i16")] + fn _svcmpgt_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { simd_cast(_svcmpgt_s16(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpgt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv4i32")] + fn _svcmpgt_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpgt_s32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpgt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv2i64")] + fn _svcmpgt_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpgt_s64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpgt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpgt_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpgt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpgt_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpgt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpgt_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpgt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpgt_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpgt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpgt_f32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmplt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpgt_f64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmplt_f64(pg, op1, svdup_n_f64(op2)) +} + +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv4f32")] + fn _svcmpne_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpne_f32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpne_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv2f64")] + fn _svcmpne_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpne_f64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpne_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv16i8")] + fn _svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpne_s8(pg, op1, op2) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpne_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv8i16")] + fn _svcmpne_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { simd_cast(_svcmpne_s16(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpne_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv4i32")] + fn _svcmpne_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpne_s32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpne_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv2i64")] + fn _svcmpne_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpne_s64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpne_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpne_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpne_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpne_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpne_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpne_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpne_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpne_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpne_u64(pg, op1, svdup_n_u64(op2)) +} + +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv4f32")] + fn _svcmpge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpge_f32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpge_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv2f64")] + fn _svcmpge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmpge_f64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpge_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmne))] -pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv2f64")] - fn _svcmpne_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv16i8")] + fn _svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; } - unsafe { simd_cast(_svcmpne_f64(simd_cast(pg), op1, op2)) } + unsafe { _svcmpge_s8(pg, op1, op2) } } -#[doc = "Compare not equal to"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmne))] -pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmpne_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpge_s8(pg, op1, svdup_n_s8(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv16i8")] - fn _svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv8i16")] + fn _svcmpge_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; } - unsafe { _svcmpne_s8(pg, op1, op2) } + unsafe { simd_cast(_svcmpge_s16(simd_cast(pg), op1, op2)) } } -#[doc = "Compare not equal to"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmpne_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpge_s16(pg, op1, svdup_n_s16(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv8i16")] - fn _svcmpne_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv4i32")] + fn _svcmpge_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; } - unsafe { simd_cast(_svcmpne_s16(simd_cast(pg), op1, op2)) } + unsafe { simd_cast(_svcmpge_s32(simd_cast(pg), op1, op2)) } } -#[doc = "Compare not equal to"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmpne_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpge_s32(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv4i32")] - fn _svcmpne_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv2i64")] + fn _svcmpge_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; } - unsafe { simd_cast(_svcmpne_s32(simd_cast(pg), op1, op2)) } + unsafe { simd_cast(_svcmpge_s64(simd_cast(pg), op1, op2)) } } -#[doc = "Compare not equal to"] +#[doc = "Compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmpne_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpge_s64(pg, op1, svdup_n_s64(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpge_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpge_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpge_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpge_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpge_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpge_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpge_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmhs))] +pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpge_u64(pg, op1, svdup_n_u64(op2)) +} + +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmle))] +pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmple.nxv4f32")] + fn _svcmple_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmple_f32(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmle))] +pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmple_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmle))] +pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmple.nxv2f64")] + fn _svcmple_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmple_f64(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fcmle))] +pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmple_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv16i8")] + fn _svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmple_s8(pg, op1, op2) } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmple_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv2i64")] - fn _svcmpne_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv8i16")] + fn _svcmple_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; } - unsafe { simd_cast(_svcmpne_s64(simd_cast(pg), op1, op2)) } -} -#[doc = "Compare not equal to"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"] -#[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmpne_s64(pg, op1, svdup_n_s64(op2)) + unsafe { simd_cast(_svcmple_s16(simd_cast(pg), op1, op2)) } } -#[doc = "Compare not equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - unsafe { svcmpne_s8(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmple_s16(pg, op1, svdup_n_s16(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmpne_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv4i32")] + fn _svcmple_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmple_s32(simd_cast(pg), op1, op2)) } } -#[doc = "Compare not equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - unsafe { svcmpne_s16(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmple_s32(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmpne_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv2i64")] + fn _svcmple_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { simd_cast(_svcmple_s64(simd_cast(pg), op1, op2)) } } -#[doc = "Compare not equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - unsafe { svcmpne_s32(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmple_s64(pg, op1, svdup_n_s64(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmpne_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmple_s8(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Compare not equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - unsafe { svcmpne_s64(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmple_u8(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmpne_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmple_s16(pg, op1.as_signed(), op2.as_signed()) } } - -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmge))] -pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv4f32")] - fn _svcmpge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmpge_f32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmple_u16(pg, op1, svdup_n_u16(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmge))] -pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmpge_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmple_s32(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmge))] -pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv2f64")] - fn _svcmpge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpge_f64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmple_u32(pg, op1, svdup_n_u32(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmge))] -pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmpge_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmple_s64(pg, op1.as_signed(), op2.as_signed()) } } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv16i8")] - fn _svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; - } - unsafe { _svcmpge_s8(pg, op1, op2) } +#[cfg_attr(test, assert_instr(cmls))] +pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmple_u64(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmpge_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpgt_s8(pg, op2, op1) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv8i16")] - fn _svcmpge_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; - } - unsafe { simd_cast(_svcmpge_s16(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmplt_s8(pg, op1, svdup_n_s8(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmpge_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpgt_s16(pg, op2, op1) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv4i32")] - fn _svcmpge_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmpge_s32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmplt_s16(pg, op1, svdup_n_s16(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmpge_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpgt_s32(pg, op2, op1) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv2i64")] - fn _svcmpge_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpge_s64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmplt_s32(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmpge_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpgt_s64(pg, op2, op1) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - unsafe { svcmpge_s8(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmplt_s64(pg, op1, svdup_n_s64(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmpge_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpgt_u8(pg, op2, op1) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - unsafe { svcmpge_s16(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmplt_u8(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmpge_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpgt_u16(pg, op2, op1) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - unsafe { svcmpge_s32(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmplt_u16(pg, op1, svdup_n_u16(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmpge_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpgt_u32(pg, op2, op1) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - unsafe { svcmpge_s64(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmplt_u32(pg, op1, svdup_n_u32(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmpge_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpgt_u64(pg, op2, op1) } - -#[doc = "Compare less than or equal to"] +#[doc = "Compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmle))] -pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmple.nxv4f32")] - fn _svcmple_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmple_f32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmplt_u64(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmle))] -pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmple_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s32_m(inactive, simd_cast(pg), op) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmle))] -pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmple.nxv2f64")] - fn _svcmple_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmple_f64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe { svcvt_f32_s32_m(simd_reinterpret(op), pg, op) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmle))] -pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmple_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { + svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op) } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv16i8")] - fn _svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")] + fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; } - unsafe { _svcmple_s8(pg, op1, op2) } + unsafe { _svcvt_f32_s64_m(inactive, simd_cast(pg), op) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmple_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe { svcvt_f32_s64_m(simd_reinterpret(op), pg, op) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv8i16")] - fn _svcmple_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; - } - unsafe { simd_cast(_svcmple_s16(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { + svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op) } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmple_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u32_m(inactive, simd_cast(pg), op.as_signed()) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv4i32")] - fn _svcmple_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmple_s32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe { svcvt_f32_u32_m(simd_reinterpret(op), pg, op) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmple_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op) } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv2i64")] - fn _svcmple_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")] + fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; } - unsafe { simd_cast(_svcmple_s64(simd_cast(pg), op1, op2)) } + unsafe { _svcvt_f32_u64_m(inactive, simd_cast(pg), op.as_signed()) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmple_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe { svcvt_f32_u64_m(simd_reinterpret(op), pg, op) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - unsafe { svcmple_s8(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op) } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmple_u8(pg, op1, svdup_n_u8(op2)) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s32_m(inactive, simd_cast(pg), op) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - unsafe { svcmple_s16(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe { svcvt_f64_s32_m(simd_reinterpret(op), pg, op) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmple_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { + svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op) } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - unsafe { svcmple_s32(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s64_m(inactive, simd_cast(pg), op) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmple_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe { svcvt_f64_s64_m(simd_reinterpret(op), pg, op) } } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - unsafe { svcmple_s64(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { + svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op) } -#[doc = "Compare less than or equal to"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmple_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u32_m(inactive, simd_cast(pg), op.as_signed()) } } -#[doc = "Compare less than"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { - svcmpgt_s8(pg, op2, op1) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe { svcvt_f64_u32_m(simd_reinterpret(op), pg, op) } } -#[doc = "Compare less than"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmplt_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op) } -#[doc = "Compare less than"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { - svcmpgt_s16(pg, op2, op1) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u64_m(inactive, simd_cast(pg), op.as_signed()) } } -#[doc = "Compare less than"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmplt_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe { svcvt_f64_u64_m(simd_reinterpret(op), pg, op) } } -#[doc = "Compare less than"] +#[doc = "Floating-point convert"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { - svcmpgt_s32(pg, op2, op1) +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op) } -#[doc = "Compare less than"] +#[doc = "Broadcast a scalar value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmplt_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32(op: f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] + fn _svdup_n_f32(op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32(op) } } -#[doc = "Compare less than"] +#[doc = "Broadcast a scalar value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { - svcmpgt_s64(pg, op2, op1) +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64(op: f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")] + fn _svdup_n_f64(op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64(op) } } -#[doc = "Compare less than"] +#[doc = "Broadcast a scalar value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmplt_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8(op: i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")] + fn _svdup_n_s8(op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8(op) } } -#[doc = "Compare less than"] +#[doc = "Broadcast a scalar value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - svcmpgt_u8(pg, op2, op1) +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16(op: i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")] + fn _svdup_n_s16(op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16(op) } } -#[doc = "Compare less than"] +#[doc = "Broadcast a scalar value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmplt_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32(op: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32(op) } } -#[doc = "Compare less than"] +#[doc = "Broadcast a scalar value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - svcmpgt_u16(pg, op2, op1) +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64(op: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] + fn _svdup_n_s64(op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64(op) } } -#[doc = "Compare less than"] +#[doc = "Broadcast a scalar value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmplt_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8(op: u8) -> svuint8_t { + unsafe { svdup_n_s8(op.as_signed()).as_unsigned() } } -#[doc = "Compare less than"] +#[doc = "Broadcast a scalar value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - svcmpgt_u32(pg, op2, op1) +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16(op: u16) -> svuint16_t { + unsafe { svdup_n_s16(op.as_signed()).as_unsigned() } } -#[doc = "Compare less than"] +#[doc = "Broadcast a scalar value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmplt_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32(op: u32) -> svuint32_t { + unsafe { svdup_n_s32(op.as_signed()).as_unsigned() } } -#[doc = "Compare less than"] +#[doc = "Broadcast a scalar value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - svcmpgt_u64(pg, op2, op1) +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64(op: u64) -> svuint64_t { + unsafe { svdup_n_s64(op.as_signed()).as_unsigned() } } -#[doc = "Compare less than"] +#[doc = "Unextended load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmplt_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] + fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1_f32(simd_cast(pg), base) } -#[doc = "Floating-point convert"] +#[doc = "Unextended load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t { +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32" - )] - fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] + fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; } - unsafe { _svcvt_f32_s32_m(inactive, simd_cast(pg), op) } + _svld1_f64(simd_cast(pg), base) } -#[doc = "Floating-point convert"] +#[doc = "Unextended load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { - unsafe { svcvt_f32_s32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] + fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1_s8(pg, base) } -#[doc = "Floating-point convert"] +#[doc = "Unextended load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { - svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] + fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1_s16(simd_cast(pg), base) } -#[doc = "Floating-point convert"] +#[doc = "Unextended load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t { +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")] - fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] + fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; } - unsafe { _svcvt_f32_s64_m(inactive, simd_cast(pg), op) } + _svld1_s32(simd_cast(pg), base) } -#[doc = "Floating-point convert"] +#[doc = "Unextended load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { - unsafe { svcvt_f32_s64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] + fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1_s64(simd_cast(pg), base) } -#[doc = "Floating-point convert"] +#[doc = "Unextended load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { - svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1_s8(pg, base.as_signed()).as_unsigned() } -#[doc = "Floating-point convert"] +#[doc = "Unextended load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32" - )] - fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; - } - unsafe { _svcvt_f32_u32_m(inactive, simd_cast(pg), op.as_signed()) } +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1_s16(pg, base.as_signed()).as_unsigned() } -#[doc = "Floating-point convert"] +#[doc = "Unextended load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { - unsafe { svcvt_f32_u32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1_s32(pg, base.as_signed()).as_unsigned() } -#[doc = "Floating-point convert"] +#[doc = "Unextended load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { - svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1_s64(pg, base.as_signed()).as_unsigned() } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t { +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")] - fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")] + fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; } - unsafe { _svcvt_f32_u64_m(inactive, simd_cast(pg), op.as_signed()) } + unsafe { _svmul_f32_m(simd_cast(pg), op1, op2) } } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { - unsafe { svcvt_f32_u64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_m(pg, op1, svdup_n_f32(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { - svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, op1, op2) } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv4i32" - )] - fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; - } - unsafe { _svcvt_f64_s32_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_x(pg, op1, svdup_n_f32(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { - unsafe { svcvt_f64_s32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { - svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_z(pg, op1, svdup_n_f32(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t { +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64" - )] - fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")] + fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; } - unsafe { _svcvt_f64_s64_m(inactive, simd_cast(pg), op) } + unsafe { _svmul_f64_m(simd_cast(pg), op1, op2) } } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { - unsafe { svcvt_f64_s64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_m(pg, op1, svdup_n_f64(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { - svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, op1, op2) } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv4i32" - )] - fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; - } - unsafe { _svcvt_f64_u32_m(inactive, simd_cast(pg), op.as_signed()) } +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_x(pg, op1, svdup_n_f64(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { - unsafe { svcvt_f64_u32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { - svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_z(pg, op1, svdup_n_f64(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t { +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64" - )] - fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")] + fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svcvt_f64_u64_m(inactive, simd_cast(pg), op.as_signed()) } + unsafe { _svmul_s8_m(pg, op1, op2) } } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { - unsafe { svcvt_f64_u64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_m(pg, op1, svdup_n_s8(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { - svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, op1, op2) } -#[doc = "Broadcast a scalar value"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_f32(op: f32) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] - fn _svdup_n_f32(op: f32) -> svfloat32_t; - } - unsafe { _svdup_n_f32(op) } +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_x(pg, op1, svdup_n_s8(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_f64(op: f64) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")] - fn _svdup_n_f64(op: f64) -> svfloat64_t; - } - unsafe { _svdup_n_f64(op) } +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Broadcast a scalar value"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s8(op: i8) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")] - fn _svdup_n_s8(op: i8) -> svint8_t; - } - unsafe { _svdup_n_s8(op) } +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_z(pg, op1, svdup_n_s8(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s16(op: i16) -> svint16_t { +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")] - fn _svdup_n_s16(op: i16) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")] + fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { _svdup_n_s16(op) } + unsafe { _svmul_s16_m(simd_cast(pg), op1, op2) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s32(op: i32) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] - fn _svdup_n_s32(op: i32) -> svint32_t; - } - unsafe { _svdup_n_s32(op) } +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s64(op: i64) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] - fn _svdup_n_s64(op: i64) -> svint64_t; - } - unsafe { _svdup_n_s64(op) } +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, op1, op2) } -#[doc = "Broadcast a scalar value"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u8(op: u8) -> svuint8_t { - unsafe { svdup_n_s8(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u16(op: u16) -> svuint16_t { - unsafe { svdup_n_s16(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Broadcast a scalar value"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u32(op: u32) -> svuint32_t { - unsafe { svdup_n_s32(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_z(pg, op1, svdup_n_s16(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u64(op: u64) -> svuint64_t { - unsafe { svdup_n_s64(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")] + fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmul_s32_m(simd_cast(pg), op1, op2) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] - fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; - } - _svld1_f32(simd_cast(pg), base) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] - fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; - } - _svld1_f64(simd_cast(pg), base) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, op1, op2) } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1b))] -pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] - fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t; - } - _svld1_s8(pg, base) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1h))] -pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] - fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; - } - _svld1_s16(simd_cast(pg), base) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"] +#[doc = "Multiply"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] - fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; - } - _svld1_s32(simd_cast(pg), base) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_z(pg, op1, svdup_n_s32(op2)) } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"] +#[doc = "Divide"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] - fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")] + fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; } - _svld1_s64(simd_cast(pg), base) + unsafe { _svdiv_s32_m(simd_cast(pg), op1, op2) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"] +#[doc = "Divide"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1b))] -pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { - svld1_s8(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"] +#[doc = "Divide"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1h))] -pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { - svld1_s16(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, op1, op2) } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"] +#[doc = "Divide"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { - svld1_s32(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Unextended load"] +#[doc = "Divide"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { - svld1_s64(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_z(pg, op1, svdup_n_s32(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")] - fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")] + fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svmul_f32_m(simd_cast(pg), op1, op2) } + unsafe { _svmul_s64_m(simd_cast(pg), op1, op2) } } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svmul_f32_m(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_m(pg, op1, svdup_n_s64(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svmul_f32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, op1, op2) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svmul_f32_x(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_x(pg, op1, svdup_n_s64(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svmul_f32_z(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_z(pg, op1, svdup_n_s64(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")] - fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; - } - unsafe { _svmul_f64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svmul_f64_m(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_m(pg, op1, svdup_n_u8(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svmul_f64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, op1, op2) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svmul_f64_x(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_x(pg, op1, svdup_n_u8(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svmul_f64_z(pg, op1, svdup_n_f64(op2)) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_z(pg, op1, svdup_n_u8(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")] - fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; - } - unsafe { _svmul_s8_m(pg, op1, op2) } +pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svmul_s8_m(pg, op1, svdup_n_s8(op2)) +pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_m(pg, op1, svdup_n_u16(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svmul_s8_m(pg, op1, op2) +pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, op1, op2) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svmul_s8_x(pg, op1, svdup_n_s8(op2)) +pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_x(pg, op1, svdup_n_u16(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svmul_s8_z(pg, op1, svdup_n_s8(op2)) +pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_z(pg, op1, svdup_n_u16(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")] - fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; - } - unsafe { _svmul_s16_m(simd_cast(pg), op1, op2) } +pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svmul_s16_m(pg, op1, svdup_n_s16(op2)) +pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_m(pg, op1, svdup_n_u32(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svmul_s16_m(pg, op1, op2) +pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, op1, op2) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svmul_s16_x(pg, op1, svdup_n_s16(op2)) +pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_x(pg, op1, svdup_n_u32(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svmul_s16_z(pg, op1, svdup_n_s16(op2)) +pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_z(pg, op1, svdup_n_u32(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")] - fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svmul_s32_m(simd_cast(pg), op1, op2) } +pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svmul_s32_m(pg, op1, svdup_n_s32(op2)) +pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_m(pg, op1, svdup_n_u64(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svmul_s32_m(pg, op1, op2) +pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, op1, op2) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svmul_s32_x(pg, op1, svdup_n_s32(op2)) +pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_x(pg, op1, svdup_n_u64(op2)) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } #[doc = "Multiply"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svmul_s32_z(pg, op1, svdup_n_s32(op2)) +pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Divide"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")] - fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")] + fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svdiv_s32_m(simd_cast(pg), op1, op2) } + unsafe { _svorr_s8_m(pg, op1, op2) } } -#[doc = "Divide"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svdiv_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_m(pg, op1, svdup_n_s8(op2)) } -#[doc = "Divide"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svdiv_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, op1, op2) } -#[doc = "Divide"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svdiv_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_x(pg, op1, svdup_n_s8(op2)) } -#[doc = "Divide"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Divide"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svdiv_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_z(pg, op1, svdup_n_s8(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")] - fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")] + fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { _svmul_s64_m(simd_cast(pg), op1, op2) } + unsafe { _svorr_s16_m(simd_cast(pg), op1, op2) } } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svmul_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svmul_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, op1, op2) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svmul_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svmul_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")] + fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svorr_s32_m(simd_cast(pg), op1, op2) } } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svmul_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, op1, op2) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svmul_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svmul_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_z(pg, op1, svdup_n_s32(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svmul_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")] + fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svorr_s64_m(simd_cast(pg), op1, op2) } } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svmul_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, op1, op2) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svmul_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svmul_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_z(pg, op1, svdup_n_s64(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svmul_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svmul_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, op1, op2) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svmul_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_x(pg, op1, svdup_n_u8(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svmul_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_z(pg, op1, svdup_n_u8(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svmul_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_m(pg, op1, svdup_n_u16(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svmul_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, op1, op2) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svmul_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_x(pg, op1, svdup_n_u16(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svmul_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_z(pg, op1, svdup_n_u16(op2)) } -#[doc = "Multiply"] +#[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svmul_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } #[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")] - fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; - } - unsafe { _svorr_s8_m(pg, op1, op2) } +pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_m(pg, op1, svdup_n_u32(op2)) } #[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svorr_s8_m(pg, op1, svdup_n_s8(op2)) +pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, op1, op2) } #[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svorr_s8_m(pg, op1, op2) +pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_x(pg, op1, svdup_n_u32(op2)) } #[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svorr_s8_x(pg, op1, svdup_n_s8(op2)) +pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } #[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_z(pg, op1, svdup_n_u32(op2)) } #[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svorr_s8_z(pg, op1, svdup_n_s8(op2)) +pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } #[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")] - fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; - } - unsafe { _svorr_s16_m(simd_cast(pg), op1, op2) } +pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_m(pg, op1, svdup_n_u64(op2)) } #[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svorr_s16_m(pg, op1, svdup_n_s16(op2)) +pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, op1, op2) } #[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svorr_s16_m(pg, op1, op2) +pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_x(pg, op1, svdup_n_u64(op2)) } #[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svorr_s16_x(pg, op1, svdup_n_s16(op2)) +pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } #[doc = "Bitwise inclusive OR"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Set predicate elements to true"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svorr_s16_z(pg, op1, svdup_n_s16(op2)) +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b8() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")] + fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t; + } + unsafe { _svptrue_pat_b8(PATTERN) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Set predicate elements to true"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b16() -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")] - fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")] + fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t; } - unsafe { _svorr_s32_m(simd_cast(pg), op1, op2) } + unsafe { simd_cast(_svptrue_pat_b16(PATTERN)) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Set predicate elements to true"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svorr_s32_m(pg, op1, svdup_n_s32(op2)) +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b32() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")] + fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t; + } + unsafe { simd_cast(_svptrue_pat_b32(PATTERN)) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Set predicate elements to true"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svorr_s32_m(pg, op1, op2) +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b64() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")] + fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t; + } + unsafe { simd_cast(_svptrue_pat_b64(PATTERN)) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Conditionally select elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svorr_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Conditionally select elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Conditionally select elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svorr_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Conditionally select elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")] - fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; - } - unsafe { _svorr_s64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Conditionally select elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svorr_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Conditionally select elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svorr_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Conditionally select elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svorr_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Conditionally select elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Conditionally select elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svorr_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Conditionally select elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Conditionally select elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svorr_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svorr_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] + fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst1_f32(data, simd_cast(pg), base) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svorr_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] + fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst1_f64(data, simd_cast(pg), base) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] + fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst1_s8(data, pg, base) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svorr_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] + fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst1_s16(data, simd_cast(pg), base) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] + fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst1_s32(data, simd_cast(pg), base) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svorr_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] + fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst1_s64(data, simd_cast(pg), base) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svorr_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svst1_s8(pg, base.as_signed(), data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svorr_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svst1_s16(pg, base.as_signed(), data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svst1_s32(pg, base.as_signed(), data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Non-truncating store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svorr_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svst1_s64(pg, base.as_signed(), data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")] + fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsub_f32_m(simd_cast(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svorr_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_m(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svorr_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, op1, op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svorr_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_x(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svorr_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_z(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")] + fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsub_f64_m(simd_cast(pg), op1, op2) } } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svorr_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_m(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svorr_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, op1, op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svorr_u64_x(pg, op1, svdup_n_u64(op2)) +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_x(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svorr_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_z(pg, op1, svdup_n_f64(op2)) } -#[doc = "Set predicate elements to true"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b8() -> svbool_t { +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")] - fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")] + fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svptrue_pat_b8(PATTERN) } + unsafe { _svsub_s8_m(pg, op1, op2) } } -#[doc = "Set predicate elements to true"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b16() -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")] - fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t; - } - unsafe { simd_cast(_svptrue_pat_b16(PATTERN)) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_m(pg, op1, svdup_n_s8(op2)) } -#[doc = "Set predicate elements to true"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b32() -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")] - fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t; - } - unsafe { simd_cast(_svptrue_pat_b32(PATTERN)) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, op1, op2) } -#[doc = "Set predicate elements to true"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b64() -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")] - fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t; - } - unsafe { simd_cast(_svptrue_pat_b64(PATTERN)) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_x(pg, op1, svdup_n_s8(op2)) } -#[doc = "Conditionally select elements"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { - unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Conditionally select elements"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_z(pg, op1, svdup_n_s8(op2)) } -#[doc = "Conditionally select elements"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")] + fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsub_s16_m(simd_cast(pg), op1, op2) } } -#[doc = "Conditionally select elements"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Conditionally select elements"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, op1, op2) } -#[doc = "Conditionally select elements"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Conditionally select elements"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Conditionally select elements"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_z(pg, op1, svdup_n_s16(op2)) } -#[doc = "Conditionally select elements"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")] + fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsub_s32_m(simd_cast(pg), op1, op2) } } -#[doc = "Conditionally select elements"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Conditionally select elements"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, op1, op2) } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] - fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); - } - _svst1_f32(data, simd_cast(pg), base) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] - fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); - } - _svst1_f64(data, simd_cast(pg), base) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1b))] -pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] - fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); - } - _svst1_s8(data, pg, base) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_z(pg, op1, svdup_n_s32(op2)) } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1h))] -pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] - fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")] + fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; } - _svst1_s16(data, simd_cast(pg), base) + unsafe { _svsub_s64_m(simd_cast(pg), op1, op2) } } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] - fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); - } - _svst1_s32(data, simd_cast(pg), base) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] - fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); - } - _svst1_s64(data, simd_cast(pg), base) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, op1, op2) } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1b))] -pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { - svst1_s8(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Non-truncating store"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1h))] -pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { - svst1_s16(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Non-truncating store"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { - svst1_s32(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Non-truncating store"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { - svst1_s64(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, op1, op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")] - fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; - } - unsafe { _svsub_f32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_x(pg, op1, svdup_n_u8(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsub_f32_m(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svsub_f32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_z(pg, op1, svdup_n_u8(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsub_f32_x(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_m(pg, op1, svdup_n_u16(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsub_f32_z(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, op1, op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")] - fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; - } - unsafe { _svsub_f64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_x(pg, op1, svdup_n_u16(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsub_f64_m(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svsub_f64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_z(pg, op1, svdup_n_u16(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsub_f64_x(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_m(pg, op1, svdup_n_u32(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsub_f64_z(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, op1, op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")] - fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; - } - unsafe { _svsub_s8_m(pg, op1, op2) } +pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_x(pg, op1, svdup_n_u32(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsub_s8_m(pg, op1, svdup_n_s8(op2)) +pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svsub_s8_m(pg, op1, op2) +pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_z(pg, op1, svdup_n_u32(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsub_s8_x(pg, op1, svdup_n_s8(op2)) +pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_m(pg, op1, svdup_n_u64(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsub_s8_z(pg, op1, svdup_n_s8(op2)) +pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, op1, op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")] - fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; - } - unsafe { _svsub_s16_m(simd_cast(pg), op1, op2) } +pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_x(pg, op1, svdup_n_u64(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsub_s16_m(pg, op1, svdup_n_s16(op2)) +pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svsub_s16_m(pg, op1, op2) +pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsub_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv4f32")] + fn _svabd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabd_f32_m(simd_cast(pg), op1, op2) } } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_m(pg, op1, svdup_n_f32(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsub_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, op1, op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")] - fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svsub_s32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_x(pg, op1, svdup_n_f32(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsub_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svsub_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_z(pg, op1, svdup_n_f32(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsub_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv2f64")] + fn _svabd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svabd_f64_m(simd_cast(pg), op1, op2) } } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_m(pg, op1, svdup_n_f64(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsub_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, op1, op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")] - fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; - } - unsafe { _svsub_s64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_x(pg, op1, svdup_n_f64(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsub_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svsub_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_z(pg, op1, svdup_n_f64(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsub_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv16i8")] + fn _svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svabd_s8_m(pg, op1, op2) } } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_m(pg, op1, svdup_n_s8(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsub_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, op1, op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_x(pg, op1, svdup_n_s8(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsub_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svsub_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_z(pg, op1, svdup_n_s8(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsub_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv8i16")] + fn _svabd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svabd_s16_m(simd_cast(pg), op1, op2) } } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsub_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, op1, op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsub_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svsub_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_z(pg, op1, svdup_n_s16(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsub_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv4i32")] + fn _svabd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svabd_s32_m(simd_cast(pg), op1, op2) } } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsub_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, op1, op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsub_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svsub_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_z(pg, op1, svdup_n_s32(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsub_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv2i64")] + fn _svabd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svabd_s64_m(simd_cast(pg), op1, op2) } } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsub_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, op1, op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsub_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svsub_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_z(pg, op1, svdup_n_s64(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsub_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv16i8")] + fn _svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t; + } + unsafe { _svabd_u8_m(pg, op1, op2) } } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Subtract"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsub_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, op1, op2) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv4f32")] - fn _svabd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; - } - unsafe { _svabd_f32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_x(pg, op1, svdup_n_u8(op2)) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svabd_f32_m(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svabd_f32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_z(pg, op1, svdup_n_u8(op2)) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svabd_f32_x(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv8i16")] + fn _svabd_u16_m(pg: svbool8_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t; + } + unsafe { _svabd_u16_m(simd_cast(pg), op1, op2) } } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svabd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_m(pg, op1, svdup_n_u16(op2)) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svabd_f32_z(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, op1, op2) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv2f64")] - fn _svabd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; - } - unsafe { _svabd_f64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_x(pg, op1, svdup_n_u16(op2)) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svabd_f64_m(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svabd_f64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_z(pg, op1, svdup_n_u16(op2)) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svabd_f64_x(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv4i32")] + fn _svabd_u32_m(pg: svbool4_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t; + } + unsafe { _svabd_u32_m(simd_cast(pg), op1, op2) } } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svabd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_m(pg, op1, svdup_n_u32(op2)) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svabd_f64_z(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, op1, op2) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv16i8")] - fn _svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; - } - unsafe { _svabd_s8_m(pg, op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_x(pg, op1, svdup_n_u32(op2)) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svabd_s8_m(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svabd_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_z(pg, op1, svdup_n_u32(op2)) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svabd_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv2i64")] + fn _svabd_u64_m(pg: svbool2_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t; + } + unsafe { _svabd_u64_m(simd_cast(pg), op1, op2) } } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svabd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_m(pg, op1, svdup_n_u64(op2)) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svabd_s8_z(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, op1, op2) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv8i16")] - fn _svabd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; - } - unsafe { _svabd_s16_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_x(pg, op1, svdup_n_u64(op2)) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svabd_s16_m(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } #[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svabd_s16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Absolute difference"] + +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svabd_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_m(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv4f32")] + fn _svabs_f32_m(pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabs_f32_m(simd_cast(pg), op) } } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svabd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(pg, op) } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svabd_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(pg, svsel_f32(pg, op, svdup_n_f32(0.0))) } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_m(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv4i32")] - fn _svabd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv2f64")] + fn _svabs_f64_m(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; } - unsafe { _svabd_s32_m(simd_cast(pg), op1, op2) } + unsafe { _svabs_f64_m(simd_cast(pg), op) } } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svabd_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(pg, op) } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svabd_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(pg, svsel_f64(pg, op, svdup_n_f64(0.0))) } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svabd_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_m(pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv16i8")] + fn _svabs_s8_m(pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svabs_s8_m(pg, op) } } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svabd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(pg, op) } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svabd_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(pg, svsel_s8(pg, op, svdup_n_s8(0))) } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_m(pg: svbool_t, op: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv2i64")] - fn _svabd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv8i16")] + fn _svabs_s16_m(pg: svbool8_t, op: svint16_t) -> svint16_t; } - unsafe { _svabd_s64_m(simd_cast(pg), op1, op2) } + unsafe { _svabs_s16_m(simd_cast(pg), op) } } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svabd_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(pg, op) } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svabd_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(pg, svsel_s16(pg, op, svdup_n_s16(0))) } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svabd_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_m(pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv4i32")] + fn _svabs_s32_m(pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svabs_s32_m(simd_cast(pg), op) } } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svabd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(pg, op) } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svabd_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(pg, svsel_s32(pg, op, svdup_n_s32(0))) } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_m(pg: svbool_t, op: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv16i8")] - fn _svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv2i64")] + fn _svabs_s64_m(pg: svbool2_t, op: svint64_t) -> svint64_t; } - unsafe { _svabd_u8_m(pg, op1, op2) } + unsafe { _svabs_s64_m(simd_cast(pg), op) } } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svabd_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(pg, op) } -#[doc = "Absolute difference"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svabd_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(pg, svsel_s64(pg, op, svdup_n_s64(0))) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svabd_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv16i8")] + fn _svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcnot_s8_m(inactive, pg, op) } } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svabd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(op, pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svabd_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(svdup_n_s8(0), pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv8i16")] - fn _svabd_u16_m(pg: svbool8_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv8i16")] + fn _svcnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; } - unsafe { _svabd_u16_m(simd_cast(pg), op1, op2) } + unsafe { _svcnot_s16_m(inactive, simd_cast(pg), op) } } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svabd_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(op, pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svabd_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(svdup_n_s16(0), pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svabd_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv4i32")] + fn _svcnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcnot_s32_m(inactive, simd_cast(pg), op) } } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svabd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(op, pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svabd_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(svdup_n_s32(0), pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv4i32")] - fn _svabd_u32_m(pg: svbool4_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv2i64")] + fn _svcnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; } - unsafe { _svabd_u32_m(simd_cast(pg), op1, op2) } -} -#[doc = "Absolute difference"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"] -#[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svabd_u32_m(pg, op1, svdup_n_u32(op2)) + unsafe { _svcnot_s64_m(inactive, simd_cast(pg), op) } } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svabd_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(op, pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svabd_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(svdup_n_s64(0), pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svabd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svabd_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(op, pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv2i64")] - fn _svabd_u64_m(pg: svbool2_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t; - } - unsafe { _svabd_u64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(svdup_n_u8(0), pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svabd_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svabd_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(op, pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svabd_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(svdup_n_u16(0), pg, op) } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svabd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } } -#[doc = "Absolute difference"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svabd_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(op, pg, op) } - -#[doc = "Absolute value"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f32_m(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv4f32")] - fn _svabs_f32_m(pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; - } - unsafe { _svabs_f32_m(simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(svdup_n_u32(0), pg, op) } -#[doc = "Absolute value"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { - svabs_f32_m(pg, op) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } } -#[doc = "Absolute value"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { - svabs_f32_m(pg, svsel_f32(pg, op, svdup_n_f32(0.0))) +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(op, pg, op) } -#[doc = "Absolute value"] +#[doc = "Conditional bitwise NOT"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f64_m(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv2f64")] - fn _svabs_f64_m(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; - } - unsafe { _svabs_f64_m(simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(svdup_n_u64(0), pg, op) } -#[doc = "Absolute value"] +// ============================================================================ +// Batch 3: Reduction/Horizontal Operations +// ============================================================================ +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { - svabs_f64_m(pg, op) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv16i8")] + fn _svaddv_s8(pg: svbool8_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_s8(simd_cast(pg), op) } } -#[doc = "Absolute value"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { - svabs_f64_m(pg, svsel_f64(pg, op, svdup_n_f64(0.0))) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv8i16")] + fn _svaddv_s16(pg: svbool4_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_s16(simd_cast(pg), op) } } -#[doc = "Absolute value"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s8_m(pg: svbool_t, op: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv16i8")] - fn _svabs_s8_m(pg: svbool_t, op: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv4i32")] + fn _svaddv_s32(pg: svbool2_t, op: svint32_t) -> i64; } - unsafe { _svabs_s8_m(pg, op) } + unsafe { _svaddv_s32(simd_cast(pg), op) } } -#[doc = "Absolute value"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { - svabs_s8_m(pg, op) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv2i64")] + fn _svaddv_s64(pg: svbool_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_s64(pg, op) } } -#[doc = "Absolute value"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { - svabs_s8_m(pg, svsel_s8(pg, op, svdup_n_s8(0))) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 { + unsafe { svaddv_s8(pg, op.as_signed()) as u64 } } -#[doc = "Absolute value"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s16_m(pg: svbool_t, op: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv8i16")] - fn _svabs_s16_m(pg: svbool8_t, op: svint16_t) -> svint16_t; - } - unsafe { _svabs_s16_m(simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 { + unsafe { svaddv_s16(pg, op.as_signed()) as u64 } } -#[doc = "Absolute value"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { - svabs_s16_m(pg, op) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 { + unsafe { svaddv_s32(pg, op.as_signed()) as u64 } } -#[doc = "Absolute value"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { - svabs_s16_m(pg, svsel_s16(pg, op, svdup_n_s16(0))) +#[cfg_attr(test, assert_instr(addv))] +pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svaddv_s64(pg, op.as_signed()) as u64 } } -#[doc = "Absolute value"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s32_m(pg: svbool_t, op: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv4i32")] - fn _svabs_s32_m(pg: svbool4_t, op: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv4f32")] + fn _svaddv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; } - unsafe { _svabs_s32_m(simd_cast(pg), op) } + unsafe { _svaddv_f32(simd_cast(pg), op) } } -#[doc = "Absolute value"] +#[doc = "Add across vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { - svabs_s32_m(pg, op) +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv2f64")] + fn _svaddv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svaddv_f64(simd_cast(pg), op) } } -#[doc = "Absolute value"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)]"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { - svabs_s32_m(pg, svsel_s32(pg, op, svdup_n_s32(0))) +#[cfg_attr(test, assert_instr(cntb))] +pub fn svcntb() -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntb")] + fn _svcntb() -> i32; + } + unsafe { _svcntb() } } -#[doc = "Absolute value"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)]"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s64_m(pg: svbool_t, op: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(cnth))] +pub fn svcnth() -> i32 { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv2i64")] - fn _svabs_s64_m(pg: svbool2_t, op: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnth")] + fn _svcnth() -> i32; } - unsafe { _svabs_s64_m(simd_cast(pg), op) } + unsafe { _svcnth() } } -#[doc = "Absolute value"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)]"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { - svabs_s64_m(pg, op) +#[cfg_attr(test, assert_instr(cntd))] +pub fn svcntd() -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntd")] + fn _svcntd() -> i32; + } + unsafe { _svcntd() } } -#[doc = "Absolute value"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { - svabs_s64_m(pg, svsel_s64(pg, op, svdup_n_s64(0))) +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv16i1")] + fn _svcntp_b8(pg: svbool8_t, op: svbool8_t) -> u64; + } + unsafe { _svcntp_b8(simd_cast(pg), simd_cast(op)) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv16i8")] - fn _svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv8i1")] + fn _svcntp_b16(pg: svbool4_t, op: svbool4_t) -> u64; } - unsafe { _svcnot_s8_m(inactive, pg, op) } + unsafe { _svcntp_b16(simd_cast(pg), simd_cast(op)) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { - svcnot_s8_m(op, pg, op) +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv4i1")] + fn _svcntp_b32(pg: svbool2_t, op: svbool2_t) -> u64; + } + unsafe { _svcntp_b32(simd_cast(pg), simd_cast(op)) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count active predicate elements"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { - svcnot_s8_m(svdup_n_s8(0), pg, op) +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv2i1")] + fn _svcntp_b64(pg: svbool_t, op: svbool_t) -> u64; + } + unsafe { _svcntp_b64(pg, op) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8(pg: svbool_t, op: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv8i16")] - fn _svcnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv16i8")] + fn _svclz_s8(pg: svbool8_t, op: svint8_t) -> svint8_t; } - unsafe { _svcnot_s16_m(inactive, simd_cast(pg), op) } + unsafe { _svclz_s8(simd_cast(pg), op) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { - svcnot_s16_m(op, pg, op) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16(pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv8i16")] + fn _svclz_s16(pg: svbool4_t, op: svint16_t) -> svint16_t; + } + unsafe { _svclz_s16(simd_cast(pg), op) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { - svcnot_s16_m(svdup_n_s16(0), pg, op) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32(pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv4i32")] + fn _svclz_s32(pg: svbool2_t, op: svint32_t) -> svint32_t; + } + unsafe { _svclz_s32(simd_cast(pg), op) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64(pg: svbool_t, op: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv4i32")] - fn _svcnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv2i64")] + fn _svclz_s64(pg: svbool_t, op: svint64_t) -> svint64_t; } - unsafe { _svcnot_s32_m(inactive, simd_cast(pg), op) } + unsafe { _svclz_s64(pg, op) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { - svcnot_s32_m(op, pg, op) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8(pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svclz_s8(pg, op.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { - svcnot_s32_m(svdup_n_s32(0), pg, op) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16(pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svclz_s16(pg, op.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv2i64")] - fn _svcnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; - } - unsafe { _svcnot_s64_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svclz_s32(pg, op.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading zeros"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { - svcnot_s64_m(op, pg, op) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svclz_s64(pg, op.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { - svcnot_s64_m(svdup_n_s64(0), pg, op) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8(pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv16i8")] + fn _svcls_s8(pg: svbool8_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcls_s8(simd_cast(pg), op) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { - unsafe { svcnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16(pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv8i16")] + fn _svcls_s16(pg: svbool4_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcls_s16(simd_cast(pg), op) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { - svcnot_u8_m(op, pg, op) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32(pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv4i32")] + fn _svcls_s32(pg: svbool2_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcls_s32(simd_cast(pg), op) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { - svcnot_u8_m(svdup_n_u8(0), pg, op) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64(pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv2i64")] + fn _svcls_s64(pg: svbool_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcls_s64(pg, op) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u8])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { - unsafe { svcnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_u8(pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcls_s8(pg, op.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u16])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { - svcnot_u16_m(op, pg, op) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_u16(pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcls_s16(pg, op.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { - svcnot_u16_m(svdup_n_u16(0), pg, op) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcls_s32(pg, op.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { - unsafe { svcnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcls_s64(pg, op.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] + +// ============================================================================ +// 第4批:地址与加载/存储族 Intrinsics +// ============================================================================ + +// ---------------------------------------------------------------------------- +// svadr - 地址生成函数 +// ---------------------------------------------------------------------------- + +#[doc = "Address generation"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadr[_s32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { - svcnot_u32_m(op, pg, op) +#[cfg_attr(test, assert_instr(adr))] +pub unsafe fn svadr_s32(pg: svbool_t, base: *const i8, offset: svint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adr.nxv4i32")] + fn _svadr_s32(pg: svbool4_t, base: *const i8, offset: svint32_t) -> svuint64_t; + } + _svadr_s32(simd_cast(pg), base, offset) } -#[doc = "Conditional bitwise NOT"] + +#[doc = "Address generation"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadr[_s64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { - svcnot_u32_m(svdup_n_u32(0), pg, op) +#[cfg_attr(test, assert_instr(adr))] +pub unsafe fn svadr_s64(pg: svbool_t, base: *const i8, offset: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adr.nxv2i64")] + fn _svadr_s64(pg: svbool2_t, base: *const i8, offset: svint64_t) -> svuint64_t; + } + _svadr_s64(simd_cast(pg), base, offset) } -#[doc = "Conditional bitwise NOT"] + +#[doc = "Address generation"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadr[_u32])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { - unsafe { svcnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(adr))] +pub unsafe fn svadr_u32(pg: svbool_t, base: *const i8, offset: svuint32_t) -> svuint64_t { + unsafe { svadr_s32(pg, base, offset.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] + +#[doc = "Address generation"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadr[_u64])"] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { - svcnot_u64_m(op, pg, op) +#[cfg_attr(test, assert_instr(adr))] +pub unsafe fn svadr_u64(pg: svbool_t, base: *const i8, offset: svuint64_t) -> svuint64_t { + unsafe { svadr_s64(pg, base, offset.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] + +// ---------------------------------------------------------------------------- +// svld1_vnum - 带向量索引的加载 +// ---------------------------------------------------------------------------- + +#[doc = "Unextended load (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { - svcnot_u64_m(svdup_n_u64(0), pg, op) +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] + fn _svld1_vnum_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + let offset_base = base.add(vnum as usize * 4); + _svld1_vnum_f32(simd_cast(pg), offset_base) } -// ============================================================================ -// Batch 3: Reduction/Horizontal Operations -// ============================================================================ -#[doc = "Add across vector"] + +#[doc = "Unextended load (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 { +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv16i8")] - fn _svaddv_s8(pg: svbool8_t, op: svint8_t) -> i64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] + fn _svld1_vnum_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; } - unsafe { _svaddv_s8(simd_cast(pg), op) } + let offset_base = base.add(vnum as usize * 2); + _svld1_vnum_f64(simd_cast(pg), offset_base) } -#[doc = "Add across vector"] + +#[doc = "Unextended load (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 { +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv8i16")] - fn _svaddv_s16(pg: svbool4_t, op: svint16_t) -> i64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] + fn _svld1_vnum_s8(pg: svbool_t, base: *const i8) -> svint8_t; } - unsafe { _svaddv_s16(simd_cast(pg), op) } + let offset_base = base.add(vnum as usize * 16); + _svld1_vnum_s8(pg, offset_base) } -#[doc = "Add across vector"] + +#[doc = "Unextended load (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 { +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv4i32")] - fn _svaddv_s32(pg: svbool2_t, op: svint32_t) -> i64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] + fn _svld1_vnum_s16(pg: svbool8_t, base: *const i16) -> svint16_t; } - unsafe { _svaddv_s32(simd_cast(pg), op) } + let offset_base = base.add(vnum as usize * 8); + _svld1_vnum_s16(simd_cast(pg), offset_base) } -#[doc = "Add across vector"] + +#[doc = "Unextended load (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 { +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv2i64")] - fn _svaddv_s64(pg: svbool_t, op: svint64_t) -> i64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] + fn _svld1_vnum_s32(pg: svbool4_t, base: *const i32) -> svint32_t; } - unsafe { _svaddv_s64(pg, op) } + let offset_base = base.add(vnum as usize * 4); + _svld1_vnum_s32(simd_cast(pg), offset_base) } -#[doc = "Add across vector"] + +#[doc = "Unextended load (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 { - unsafe { svaddv_s8(pg, op.as_signed()) as u64 } +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] + fn _svld1_vnum_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + let offset_base = base.add(vnum as usize * 2); + _svld1_vnum_s64(simd_cast(pg), offset_base) } -#[doc = "Add across vector"] + +#[doc = "Unextended load (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 { - unsafe { svaddv_s16(pg, op.as_signed()) as u64 } +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svld1_vnum_s8(pg, base.as_signed(), vnum).as_unsigned() } -#[doc = "Add across vector"] + +#[doc = "Unextended load (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 { - unsafe { svaddv_s32(pg, op.as_signed()) as u64 } +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svld1_vnum_s16(pg, base.as_signed(), vnum).as_unsigned() } -#[doc = "Add across vector"] + +#[doc = "Unextended load (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 { - unsafe { svaddv_s64(pg, op.as_signed()) as u64 } +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svld1_vnum_s32(pg, base.as_signed(), vnum).as_unsigned() } -#[doc = "Add across vector"] + +#[doc = "Unextended load (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(faddv))] -pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv4f32")] - fn _svaddv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; - } - unsafe { _svaddv_f32(simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svld1_vnum_s64(pg, base.as_signed(), vnum).as_unsigned() } -#[doc = "Add across vector"] + +// ---------------------------------------------------------------------------- +// svld1_gather - 聚集加载 +// ---------------------------------------------------------------------------- + +#[doc = "Gather load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_s32index]_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(faddv))] -pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_f32( + pg: svbool_t, + base: *const f32, + indices: svint32_t, +) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv2f64")] - fn _svaddv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.gather.index.nxv4f32")] + fn _svld1_gather_s32index_f32(pg: svbool4_t, base: *const f32, indices: svint32_t) -> svfloat32_t; } - unsafe { _svaddv_f64(simd_cast(pg), op) } + _svld1_gather_s32index_f32(simd_cast(pg), base, indices) } -#[doc = "Count active predicate elements"] + +#[doc = "Gather load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)]"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_s64index]_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntb))] -pub fn svcntb() -> i32 { +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntb")] - fn _svcntb() -> i32; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2f64")] + fn _svld1_gather_s64index_f64(pg: svbool2_t, base: *const f64, indices: svint64_t) -> svfloat64_t; } - unsafe { _svcntb() } + _svld1_gather_s64index_f64(simd_cast(pg), base, indices) } -#[doc = "Count active predicate elements"] + +#[doc = "Gather load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)]"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_s32index]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnth))] -pub fn svcnth() -> i32 { +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_s32( + pg: svbool_t, + base: *const i32, + indices: svint32_t, +) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnth")] - fn _svcnth() -> i32; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.gather.index.nxv4i32")] + fn _svld1_gather_s32index_s32(pg: svbool4_t, base: *const i32, indices: svint32_t) -> svint32_t; } - unsafe { _svcnth() } + _svld1_gather_s32index_s32(simd_cast(pg), base, indices) } -#[doc = "Count active predicate elements"] + +#[doc = "Gather load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)]"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_s64index]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntd))] -pub fn svcntd() -> i32 { +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntd")] - fn _svcntd() -> i32; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i64")] + fn _svld1_gather_s64index_s64(pg: svbool2_t, base: *const i64, indices: svint64_t) -> svint64_t; } - unsafe { _svcntd() } + _svld1_gather_s64index_s64(simd_cast(pg), base, indices) } -#[doc = "Count active predicate elements"] + +#[doc = "Gather load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32index]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntp))] -pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv16i1")] - fn _svcntp_b8(pg: svbool8_t, op: svbool8_t) -> u64; +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_u32( + pg: svbool_t, + base: *const u32, + indices: svuint32_t, +) -> svuint32_t { + unsafe { + svld1_gather_s32index_s32(pg, base.as_signed(), indices.as_signed()).as_unsigned() } - unsafe { _svcntp_b8(simd_cast(pg), simd_cast(op)) } } -#[doc = "Count active predicate elements"] + +#[doc = "Gather load"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64index]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntp))] -pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv8i1")] - fn _svcntp_b16(pg: svbool4_t, op: svbool4_t) -> u64; +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + unsafe { + svld1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() } - unsafe { _svcntp_b16(simd_cast(pg), simd_cast(op)) } } -#[doc = "Count active predicate elements"] + +// ---------------------------------------------------------------------------- +// svst1_vnum - 带向量索引的存储 +// ---------------------------------------------------------------------------- + +#[doc = "Unextended store (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntp))] -pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 { +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv4i1")] - fn _svcntp_b32(pg: svbool2_t, op: svbool2_t) -> u64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] + fn _svst1_vnum_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); } - unsafe { _svcntp_b32(simd_cast(pg), simd_cast(op)) } + let offset_base = base.add(vnum as usize * 4); + _svst1_vnum_f32(data, simd_cast(pg), offset_base) } -#[doc = "Count active predicate elements"] + +#[doc = "Unextended store (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntp))] -pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 { +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv2i1")] - fn _svcntp_b64(pg: svbool_t, op: svbool_t) -> u64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] + fn _svst1_vnum_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); } - unsafe { _svcntp_b64(pg, op) } + let offset_base = base.add(vnum as usize * 2); + _svst1_vnum_f64(data, simd_cast(pg), offset_base) } -#[doc = "Count leading zeros"] + +#[doc = "Unextended store (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_s8(pg: svbool_t, op: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv16i8")] - fn _svclz_s8(pg: svbool8_t, op: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] + fn _svst1_vnum_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); } - unsafe { _svclz_s8(simd_cast(pg), op) } + let offset_base = base.add(vnum as usize * 16); + _svst1_vnum_s8(data, pg, offset_base) } -#[doc = "Count leading zeros"] + +#[doc = "Unextended store (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_s16(pg: svbool_t, op: svint16_t) -> svint16_t { +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv8i16")] - fn _svclz_s16(pg: svbool4_t, op: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] + fn _svst1_vnum_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); } - unsafe { _svclz_s16(simd_cast(pg), op) } + let offset_base = base.add(vnum as usize * 8); + _svst1_vnum_s16(data, simd_cast(pg), offset_base) } -#[doc = "Count leading zeros"] + +#[doc = "Unextended store (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_s32(pg: svbool_t, op: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv4i32")] - fn _svclz_s32(pg: svbool2_t, op: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] + fn _svst1_vnum_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); } - unsafe { _svclz_s32(simd_cast(pg), op) } + let offset_base = base.add(vnum as usize * 4); + _svst1_vnum_s32(data, simd_cast(pg), offset_base) } -#[doc = "Count leading zeros"] + +#[doc = "Unextended store (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_s64(pg: svbool_t, op: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv2i64")] - fn _svclz_s64(pg: svbool_t, op: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] + fn _svst1_vnum_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); } - unsafe { _svclz_s64(pg, op) } + let offset_base = base.add(vnum as usize * 2); + _svst1_vnum_s64(data, simd_cast(pg), offset_base) } -#[doc = "Count leading zeros"] + +#[doc = "Unextended store (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_u8(pg: svbool_t, op: svuint8_t) -> svuint8_t { - unsafe { svclz_s8(pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { + svst1_vnum_s8(pg, base.as_signed(), vnum, data.as_signed()) } -#[doc = "Count leading zeros"] + +#[doc = "Unextended store (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_u16(pg: svbool_t, op: svuint16_t) -> svuint16_t { - unsafe { svclz_s16(pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { + svst1_vnum_s16(pg, base.as_signed(), vnum, data.as_signed()) } -#[doc = "Count leading zeros"] + +#[doc = "Unextended store (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { - unsafe { svclz_s32(pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { + svst1_vnum_s32(pg, base.as_signed(), vnum, data.as_signed()) } -#[doc = "Count leading zeros"] + +#[doc = "Unextended store (vector base + scalar offset)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { - unsafe { svclz_s64(pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { + svst1_vnum_s64(pg, base.as_signed(), vnum, data.as_signed()) } -#[doc = "Count leading sign bits"] + +// ---------------------------------------------------------------------------- +// svst1_scatter - 分散存储 +// ---------------------------------------------------------------------------- + +#[doc = "Scatter store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_s32index]_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_s8(pg: svbool_t, op: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_f32( + pg: svbool_t, + base: *mut f32, + indices: svint32_t, + data: svfloat32_t, +) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv16i8")] - fn _svcls_s8(pg: svbool8_t, op: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.scatter.index.nxv4f32")] + fn _svst1_scatter_s32index_f32(data: svfloat32_t, pg: svbool4_t, base: *mut f32, indices: svint32_t); } - unsafe { _svcls_s8(simd_cast(pg), op) } + _svst1_scatter_s32index_f32(data, simd_cast(pg), base, indices) } -#[doc = "Count leading sign bits"] + +#[doc = "Scatter store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_s64index]_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_s16(pg: svbool_t, op: svint16_t) -> svint16_t { +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svint64_t, + data: svfloat64_t, +) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv8i16")] - fn _svcls_s16(pg: svbool4_t, op: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2f64")] + fn _svst1_scatter_s64index_f64(data: svfloat64_t, pg: svbool2_t, base: *mut f64, indices: svint64_t); } - unsafe { _svcls_s16(simd_cast(pg), op) } + _svst1_scatter_s64index_f64(data, simd_cast(pg), base, indices) } -#[doc = "Count leading sign bits"] + +#[doc = "Scatter store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_s32index]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_s32(pg: svbool_t, op: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_s32( + pg: svbool_t, + base: *mut i32, + indices: svint32_t, + data: svint32_t, +) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv4i32")] - fn _svcls_s32(pg: svbool2_t, op: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.scatter.index.nxv4i32")] + fn _svst1_scatter_s32index_s32(data: svint32_t, pg: svbool4_t, base: *mut i32, indices: svint32_t); } - unsafe { _svcls_s32(simd_cast(pg), op) } + _svst1_scatter_s32index_s32(data, simd_cast(pg), base, indices) } -#[doc = "Count leading sign bits"] + +#[doc = "Scatter store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_s64index]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_s64(pg: svbool_t, op: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svint64_t, + data: svint64_t, +) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv2i64")] - fn _svcls_s64(pg: svbool_t, op: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i64")] + fn _svst1_scatter_s64index_s64(data: svint64_t, pg: svbool2_t, base: *mut i64, indices: svint64_t); } - unsafe { _svcls_s64(pg, op) } + _svst1_scatter_s64index_s64(data, simd_cast(pg), base, indices) } -#[doc = "Count leading sign bits"] + +#[doc = "Scatter store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u8])"] -#[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_u8(pg: svbool_t, op: svuint8_t) -> svuint8_t { - unsafe { svcls_s8(pg, op.as_signed()).as_unsigned() } -} -#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32index]_u32)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_u16(pg: svbool_t, op: svuint16_t) -> svuint16_t { - unsafe { svcls_s16(pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_u32( + pg: svbool_t, + base: *mut u32, + indices: svuint32_t, + data: svuint32_t, +) { + unsafe { + svst1_scatter_s32index_s32(pg, base.as_signed(), indices.as_signed(), data.as_signed()) + } } -#[doc = "Count leading sign bits"] + +#[doc = "Scatter store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u32])"] -#[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { - unsafe { svcls_s32(pg, op.as_signed()).as_unsigned() } -} -#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64index]_u64)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { - unsafe { svcls_s64(pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svuint64_t, + data: svuint64_t, +) { + unsafe { + svst1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) + } } From c0a0913113a646d31726aef13e22a31526de8a00 Mon Sep 17 00:00:00 2001 From: wxh Date: Tue, 18 Nov 2025 10:08:15 +0800 Subject: [PATCH 17/27] =?UTF-8?q?11.18=E6=9B=B4=E6=96=B0transmute=5Fcopy?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/mod.rs | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs index c61a5e21db826..afe6e5083f95c 100755 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -8,15 +8,17 @@ pub mod types; // ================================ // 修复点 1/2:去掉 simd_*,改为位级转换 // ================================ -#[inline(always)] +#[inline] +#[target_feature(enable = "sve")] pub(crate) unsafe fn simd_reinterpret(x: T) -> U { // 纯位级重解释;SVE 封装类型在这层视为opaque,避免走 simd_cast 触发 E0511 core::mem::transmute_copy::(&x) } -#[inline(always)] +#[inline] +#[target_feature(enable = "sve")] pub(crate) unsafe fn simd_cast(x: T) -> U { - // 多数 SVE “cast”在 stdarch 内部只是布局相同的重解释;按位转即可 + // 多数 SVE "cast"在 stdarch 内部只是布局相同的重解释;按位转即可 // 如需数值语义转换,请在具体 API 内对接相应 LLVM SVE convert 内建。 core::mem::transmute_copy::(&x) } @@ -147,30 +149,34 @@ impl __SveSelect for svbool_t { // impl_sve_select!("nxv16f8", svmfloat8_t); // 实现从不同宽度的谓词类型到 svbool_t 的转换 +// 注意:这些实现直接使用 transmute_copy,不需要 target feature +// 因为 transmute_copy 是纯位级转换,不涉及 SVE 指令 impl From for svbool_t { #[inline(always)] fn from(x: svbool2_t) -> Self { - unsafe { simd_cast(x) } + // 使用 transmute_copy 进行位级转换,不需要 target feature + unsafe { core::mem::transmute_copy(&x) } } } impl From for svbool_t { #[inline(always)] fn from(x: svbool4_t) -> Self { - unsafe { simd_cast(x) } + unsafe { core::mem::transmute_copy(&x) } } } impl From for svbool_t { #[inline(always)] fn from(x: svbool8_t) -> Self { - unsafe { simd_cast(x) } + unsafe { core::mem::transmute_copy(&x) } } } // 公开的"选择"总入口:保持原函数签名不变(被 sve/*.rs 调用) // 现在它不再走 simd_select,而是经 trait 静态分派到 LLVM SVE `sel` -#[inline(always)] +#[inline] +#[target_feature(enable = "sve")] pub(crate) unsafe fn simd_select(m: M, a: T, b: T) -> T where // SVE 谓词统一为 svbool_t;避免出现 svbool4_t/svbool8_t 这类"假类型" From 85a7a6b5c0457dab462031dff62a32d29828cd94 Mon Sep 17 00:00:00 2001 From: wxh Date: Wed, 19 Nov 2025 12:34:09 +0800 Subject: [PATCH 18/27] =?UTF-8?q?11.18=E6=9B=B4=E6=96=B0=E9=83=A8=E5=88=86?= =?UTF-8?q?=E5=87=BD=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/sve.rs | 493 ++++++++++++++++++ 1 file changed, 493 insertions(+) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs index 91e1adc682aa3..1738ac572ae54 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs @@ -8442,3 +8442,496 @@ pub unsafe fn svst1_scatter_u64index_u64( svst1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) } } + +// ============================================================================ +// Additional SVE intrinsics generated based on ARM documentation and test files +// ============================================================================ + +#[doc = "Add across vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda_f16)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadda_f16(pg: svbool_t, initial: f16, op: svfloat16_t) -> f16 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fadda.nxv8f16" + )] + fn _svadda_f16(pg: svbool8_t, initial: f16, op: svfloat16_t) -> f16; + } + unsafe { _svadda_f16(simd_cast(pg), initial, op) } +} +#[doc = "Add across vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda_f32)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadda_f32(pg: svbool_t, initial: f32, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fadda.nxv4f32" + )] + fn _svadda_f32(pg: svbool4_t, initial: f32, op: svfloat32_t) -> f32; + } + unsafe { _svadda_f32(simd_cast(pg), initial, op) } +} +#[doc = "Add across vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda_f64)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadda_f64(pg: svbool_t, initial: f64, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fadda.nxv2f64" + )] + fn _svadda_f64(pg: svbool2_t, initial: f64, op: svfloat64_t) -> f64; + } + unsafe { _svadda_f64(simd_cast(pg), initial, op) } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb_u32base_s32offset)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrb_u32base_s32offset(bases: svuint32_t, offsets: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.adrb.nxv4i32" + )] + fn _svadrb_u32base_s32offset(bases: svint32_t, offsets: svint32_t) -> svint32_t; + } + unsafe { _svadrb_u32base_s32offset(bases.as_signed(), offsets).as_unsigned() } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb_u32base_u32offset)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrb_u32base_u32offset(bases: svuint32_t, offsets: svuint32_t) -> svuint32_t { + unsafe { svadrb_u32base_s32offset(bases, offsets.as_signed()) } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb_u64base_s64offset)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrb_u64base_s64offset(bases: svuint64_t, offsets: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.adrb.nxv2i64" + )] + fn _svadrb_u64base_s64offset(bases: svint64_t, offsets: svint64_t) -> svint64_t; + } + unsafe { _svadrb_u64base_s64offset(bases.as_signed(), offsets).as_unsigned() } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb_u64base_u64offset)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrb_u64base_u64offset(bases: svuint64_t, offsets: svuint64_t) -> svuint64_t { + unsafe { svadrb_u64base_s64offset(bases, offsets.as_signed()) } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd_u32base_s32index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrd_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.adrd.nxv4i32" + )] + fn _svadrd_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrd_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd_u32base_u32index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrd_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrd_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd_u64base_s64index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrd_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.adrd.nxv2i64" + )] + fn _svadrd_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrd_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd_u64base_u64index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrd_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrd_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh_u32base_s32index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrh_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.adrh.nxv4i32" + )] + fn _svadrh_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrh_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh_u32base_u32index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrh_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrh_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh_u64base_s64index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrh_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.adrh.nxv2i64" + )] + fn _svadrh_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrh_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh_u64base_u64index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrh_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrh_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw_u32base_s32index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrw_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.adrw.nxv4i32" + )] + fn _svadrw_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrw_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw_u32base_u32index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrw_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrw_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw_u64base_s64index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrw_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.adrw.nxv2i64" + )] + fn _svadrw_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrw_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Address calculation"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw_u64base_u64index)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svadrw_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrw_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Compare equal (wide)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide_s8)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv16i8" + )] + fn _svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpeq_wide_s8(pg, op1, op2) } +} +#[doc = "Compare equal (wide)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide_s16)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svcmpeq_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv8i16" + )] + fn _svcmpeq_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { simd_cast(_svcmpeq_wide_s16(simd_cast(pg), op1, op2)) } +} +#[doc = "Compare equal (wide)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide_s32)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svcmpeq_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv4i32" + )] + fn _svcmpeq_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { simd_cast(_svcmpeq_wide_s32(simd_cast(pg), op1, op2)) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.x.nxv16i8")] + fn _svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_s8(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.x.nxv8i16")] + fn _svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_s16(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.x.nxv4i32")] + fn _svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_s32(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.x.nxv2i64")] + fn _svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_s64(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.x.nxv16i8")] + fn _svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t; + } + unsafe { _svqsub_u8(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.x.nxv8i16")] + fn _svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t; + } + unsafe { _svqsub_u16(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.x.nxv4i32")] + fn _svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t; + } + unsafe { _svqsub_u32(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.x.nxv2i64")] + fn _svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t; + } + unsafe { _svqsub_u64(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64(op1, svdup_n_u64(op2)) +} From bb708a13a8f1d9839270d1557bcabe69352c76a3 Mon Sep 17 00:00:00 2001 From: wxh Date: Wed, 19 Nov 2025 12:36:55 +0800 Subject: [PATCH 19/27] =?UTF-8?q?11.19=E7=AC=AC1=E6=89=B9=E8=A1=A5?= =?UTF-8?q?=E5=85=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/sve.rs | 192 ++++++++++++++++++ 1 file changed, 192 insertions(+) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs index 1738ac572ae54..ff09a14d127dc 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs @@ -8935,3 +8935,195 @@ pub fn svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { pub fn svqsub_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { svqsub_u64(op1, svdup_n_u64(op2)) } +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv4f32")] + fn _svsubr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsubr_f32_m(simd_cast(pg), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv2f64")] + fn _svsubr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsubr_f64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv16i8")] + fn _svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsubr_s8_m(pg, op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} From ba00c51ea6b68a0befb3503ade8892f00137408c Mon Sep 17 00:00:00 2001 From: wxh Date: Wed, 19 Nov 2025 15:27:40 +0800 Subject: [PATCH 20/27] =?UTF-8?q?11.19=E4=B8=8A=E5=8D=88=E8=A1=A5=E5=85=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/sve.rs | 982 ++++++++++++++++++ 1 file changed, 982 insertions(+) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs index ff09a14d127dc..8c92ed12b14af 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs @@ -9127,3 +9127,985 @@ pub fn svsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { pub fn svsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { svsubr_s8_z(pg, op1, svdup_n_s8(op2)) } +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv8i16")] + fn _svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsubr_s16_m(pg, op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv4i32")] + fn _svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsubr_s32_m(pg, op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv2i64")] + fn _svsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsubr_s64_m(simd_cast(pg), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + let op1_s: svint8_t = unsafe { core::mem::transmute(op1) }; + let op2_s: svint8_t = unsafe { core::mem::transmute(op2) }; + + let res_s: svint8_t = svsubr_s8_m(pg, op1_s, op2_s); + + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + let op1_s: svint16_t = unsafe { core::mem::transmute(op1) }; + let op2_s: svint16_t = unsafe { core::mem::transmute(op2) }; + let res_s: svint16_t = svsubr_s16_m(pg, op1_s, op2_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + let op1_s: svint32_t = unsafe { core::mem::transmute(op1) }; + let op2_s: svint32_t = unsafe { core::mem::transmute(op2) }; + let res_s: svint32_t = svsubr_s32_m(pg, op1_s, op2_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + let op1_s: svint64_t = unsafe { core::mem::transmute(op1) }; + let op2_s: svint64_t = unsafe { core::mem::transmute(op2) }; + let res_s: svint64_t = svsubr_s64_m(pg, op1_s, op2_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.x.nxv16i8")] + fn _svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_s8(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.x.nxv8i16")] + fn _svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_s16(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.x.nxv4i32")] + fn _svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_s32(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.x.nxv2i64")] + fn _svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_s64(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.x.nxv16i8")] + fn _svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t; + } + unsafe { _svqadd_u8(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.x.nxv8i16")] + fn _svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t; + } + unsafe { _svqadd_u16(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.x.nxv4i32")] + fn _svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t; + } + unsafe { _svqadd_u32(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.x.nxv2i64")] + fn _svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t; + } + unsafe { _svqadd_u64(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.compact.nxv4f32")] + fn _svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svcompact_f32(pg, op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f64(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.compact.nxv2f64")] + fn _svcompact_f64(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svcompact_f64(simd_cast(pg), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.compact.nxv4i32")] + fn _svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcompact_s32(pg, op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s64(pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.compact.nxv2i64")] + fn _svcompact_s64(pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcompact_s64(simd_cast(pg), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { + let op_s: svint32_t = unsafe { core::mem::transmute(op) }; + let res_s: svint32_t = svcompact_s32(pg, op_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { + let op_s: svint64_t = unsafe { core::mem::transmute(op) }; + let res_s: svint64_t = svcompact_s64(pg, op_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4f32")] + fn _svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32; + } + unsafe { _svlasta_f32(pg, op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2f64")] + fn _svlasta_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlasta_f64(simd_cast(pg), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv16i8")] + fn _svlasta_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlasta_s8(pg, op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv8i16")] + fn _svlasta_s16(pg: svbool_t, op: svint16_t) -> i16; + } + unsafe { _svlasta_s16(pg, op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4i32")] + fn _svlasta_s32(pg: svbool_t, op: svint32_t) -> i32; + } + unsafe { _svlasta_s32(pg, op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2i64")] + fn _svlasta_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlasta_s64(simd_cast(pg), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u8(pg: svbool_t, op: svuint8_t) -> u8 { + let op_s: svint8_t = unsafe { core::mem::transmute(op) }; + let res_s: i8 = svlasta_s8(pg, op_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u16(pg: svbool_t, op: svuint16_t) -> u16 { + let op_s: svint16_t = unsafe { core::mem::transmute(op) }; + let res_s: i16 = svlasta_s16(pg, op_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u32(pg: svbool_t, op: svuint32_t) -> u32 { + let op_s: svint32_t = unsafe { core::mem::transmute(op) }; + let res_s: i32 = svlasta_s32(pg, op_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u64(pg: svbool_t, op: svuint64_t) -> u64 { + let op_s: svint64_t = unsafe { core::mem::transmute(op) }; + let res_s: i64 = svlasta_s64(pg, op_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4f32")] + fn _svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32; + } + unsafe { _svlastb_f32(pg, op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2f64")] + fn _svlastb_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlastb_f64(simd_cast(pg), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv16i8")] + fn _svlastb_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlastb_s8(pg, op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv8i16")] + fn _svlastb_s16(pg: svbool_t, op: svint16_t) -> i16; + } + unsafe { _svlastb_s16(pg, op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4i32")] + fn _svlastb_s32(pg: svbool_t, op: svint32_t) -> i32; + } + unsafe { _svlastb_s32(pg, op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2i64")] + fn _svlastb_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlastb_s64(simd_cast(pg), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u8(pg: svbool_t, op: svuint8_t) -> u8 { + let op_s: svint8_t = unsafe { core::mem::transmute(op) }; + let res_s: i8 = svlastb_s8(pg, op_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u16(pg: svbool_t, op: svuint16_t) -> u16 { + let op_s: svint16_t = unsafe { core::mem::transmute(op) }; + let res_s: i16 = svlastb_s16(pg, op_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u32(pg: svbool_t, op: svuint32_t) -> u32 { + let op_s: svint32_t = unsafe { core::mem::transmute(op) }; + let res_s: i32 = svlastb_s32(pg, op_s); + unsafe { core::mem::transmute::(res_s) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u64(pg: svbool_t, op: svuint64_t) -> u64 { + let op_s: svint64_t = unsafe { core::mem::transmute(op) }; + let res_s: i64 = svlastb_s64(pg, op_s); + unsafe { core::mem::transmute::(res_s) } +} From 43973b228f56cdba001b39ef29bbc46ac31905a1 Mon Sep 17 00:00:00 2001 From: wxh Date: Wed, 19 Nov 2025 16:06:07 +0800 Subject: [PATCH 21/27] =?UTF-8?q?11.19=E7=AC=AC2=E6=89=B9=E8=A1=A5?= =?UTF-8?q?=E5=85=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/sve.rs | 210 ++++++++++++++++++ .../crates/core_arch/src/aarch64/sve/types.rs | 96 ++++---- 2 files changed, 258 insertions(+), 48 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs index 8c92ed12b14af..f34073938ddae 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs @@ -4764,6 +4764,216 @@ pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { svmul_u64_z(pg, op1, svdup_n_u64(op2)) } +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv4f32" + )] + fn _svrecpe_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpe_f32(op) } +} +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv2f64" + )] + fn _svrecpe_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpe_f64(op) } +} +#[doc = "Reciprocal step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv4f32" + )] + fn _svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecps_f32(op1, op2) } +} +#[doc = "Reciprocal step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv2f64" + )] + fn _svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecps_f64(op1, op2) } +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv4f32" + )] + fn _svrecpx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpx_f32_m(inactive, simd_cast(pg), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv2f64" + )] + fn _svrecpx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpx_f64_m(inactive, simd_cast(pg), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv4f32" + )] + fn _svrsqrte_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrte_f32(op) } +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv2f64" + )] + fn _svrsqrte_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrte_f64(op) } +} +#[doc = "Reciprocal square root step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv4f32" + )] + fn _svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrts_f32(op1, op2) } +} +#[doc = "Reciprocal square root step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv2f64" + )] + fn _svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrts_f64(op1, op2) } +} #[doc = "Bitwise inclusive OR"] #[doc = ""] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs index 74515b1922573..a3dd5936d13a1 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs @@ -9,8 +9,8 @@ use super::simd_cast; /// SVE谓词类型 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(1)] +#[repr(C)] pub struct svbool_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -22,8 +22,8 @@ impl Clone for svbool_t { /// SVE双宽度谓词类型 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(2)] +#[repr(C)] pub struct svbool2_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -35,8 +35,8 @@ impl Clone for svbool2_t { /// SVE四宽度谓词类型 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(4)] +#[repr(C)] pub struct svbool4_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -48,8 +48,8 @@ impl Clone for svbool4_t { /// SVE八宽度谓词类型 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(8)] +#[repr(C)] pub struct svbool8_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -65,8 +65,8 @@ impl Clone for svbool8_t { /// SVE 8位有符号整数向量 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(16)] +#[repr(C)] pub struct svint8_t(i8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -78,8 +78,8 @@ impl Clone for svint8_t { /// SVE 16位有符号整数向量 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(8)] +#[repr(C)] pub struct svint16_t(i16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -91,8 +91,8 @@ impl Clone for svint16_t { /// SVE 32位有符号整数向量 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(4)] +#[repr(C)] pub struct svint32_t(i32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -104,8 +104,8 @@ impl Clone for svint32_t { /// SVE 64位有符号整数向量 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(2)] +#[repr(C)] pub struct svint64_t(i64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -117,8 +117,8 @@ impl Clone for svint64_t { /// SVE 8位无符号整数向量 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(16)] +#[repr(C)] pub struct svuint8_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -130,8 +130,8 @@ impl Clone for svuint8_t { /// SVE 16位无符号整数向量 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(8)] +#[repr(C)] pub struct svuint16_t(u16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -143,8 +143,8 @@ impl Clone for svuint16_t { /// SVE 32位无符号整数向量 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(4)] +#[repr(C)] pub struct svuint32_t(u32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -156,8 +156,8 @@ impl Clone for svuint32_t { /// SVE 64位无符号整数向量 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(2)] +#[repr(C)] pub struct svuint64_t(u64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -169,8 +169,8 @@ impl Clone for svuint64_t { /// SVE 32位浮点向量 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(4)] +#[repr(C)] pub struct svfloat32_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -182,8 +182,8 @@ impl Clone for svfloat32_t { /// SVE 64位浮点向量 #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(2)] +#[repr(C)] pub struct svfloat64_t(f64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -195,8 +195,8 @@ impl Clone for svfloat64_t { /// SVE 16位浮点向量 (使用 f32 作为底层类型) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(8)] +#[repr(C)] pub struct svfloat16_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -212,8 +212,8 @@ impl Clone for svfloat16_t { /// SVE 8位有符号整数双向量 (x2) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(32)] +#[repr(C)] pub struct svint8x2_t(i8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -225,8 +225,8 @@ impl Clone for svint8x2_t { /// SVE 8位无符号整数双向量 (x2) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(32)] +#[repr(C)] pub struct svuint8x2_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -238,8 +238,8 @@ impl Clone for svuint8x2_t { /// SVE 16位有符号整数双向量 (x2) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(16)] +#[repr(C)] pub struct svint16x2_t(i16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -251,8 +251,8 @@ impl Clone for svint16x2_t { /// SVE 16位无符号整数双向量 (x2) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(16)] +#[repr(C)] pub struct svuint16x2_t(u16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -264,8 +264,8 @@ impl Clone for svuint16x2_t { /// SVE 32位浮点双向量 (x2) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(8)] +#[repr(C)] pub struct svfloat32x2_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -277,8 +277,8 @@ impl Clone for svfloat32x2_t { /// SVE 32位有符号整数双向量 (x2) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(8)] +#[repr(C)] pub struct svint32x2_t(i32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -290,8 +290,8 @@ impl Clone for svint32x2_t { /// SVE 32位无符号整数双向量 (x2) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(8)] +#[repr(C)] pub struct svuint32x2_t(u32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -303,8 +303,8 @@ impl Clone for svuint32x2_t { /// SVE 64位浮点双向量 (x2) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(4)] +#[repr(C)] pub struct svfloat64x2_t(f64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -316,8 +316,8 @@ impl Clone for svfloat64x2_t { /// SVE 64位有符号整数双向量 (x2) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(4)] +#[repr(C)] pub struct svint64x2_t(i64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -329,8 +329,8 @@ impl Clone for svint64x2_t { /// SVE 64位无符号整数双向量 (x2) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(4)] +#[repr(C)] pub struct svuint64x2_t(u64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -342,8 +342,8 @@ impl Clone for svuint64x2_t { /// SVE 8位有符号整数三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(48)] +#[repr(C)] pub struct svint8x3_t(i8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -355,8 +355,8 @@ impl Clone for svint8x3_t { /// SVE 8位无符号整数三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(48)] +#[repr(C)] pub struct svuint8x3_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -368,8 +368,8 @@ impl Clone for svuint8x3_t { /// SVE 16位有符号整数三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(24)] +#[repr(C)] pub struct svint16x3_t(i16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -381,8 +381,8 @@ impl Clone for svint16x3_t { /// SVE 16位无符号整数三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(24)] +#[repr(C)] pub struct svuint16x3_t(u16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -394,8 +394,8 @@ impl Clone for svuint16x3_t { /// SVE 32位浮点三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(12)] +#[repr(C)] pub struct svfloat32x3_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -407,8 +407,8 @@ impl Clone for svfloat32x3_t { /// SVE 32位有符号整数三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(12)] +#[repr(C)] pub struct svint32x3_t(i32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -420,8 +420,8 @@ impl Clone for svint32x3_t { /// SVE 32位无符号整数三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(12)] +#[repr(C)] pub struct svuint32x3_t(u32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -433,8 +433,8 @@ impl Clone for svuint32x3_t { /// SVE 64位浮点三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(6)] +#[repr(C)] pub struct svfloat64x3_t(f64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -446,8 +446,8 @@ impl Clone for svfloat64x3_t { /// SVE 64位有符号整数三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(6)] +#[repr(C)] pub struct svint64x3_t(i64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -459,8 +459,8 @@ impl Clone for svint64x3_t { /// SVE 64位无符号整数三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(6)] +#[repr(C)] pub struct svuint64x3_t(u64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -472,8 +472,8 @@ impl Clone for svuint64x3_t { /// SVE 8位有符号整数四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(64)] +#[repr(C)] pub struct svint8x4_t(i8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -485,8 +485,8 @@ impl Clone for svint8x4_t { /// SVE 8位无符号整数四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(64)] +#[repr(C)] pub struct svuint8x4_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -498,8 +498,8 @@ impl Clone for svuint8x4_t { /// SVE 16位有符号整数四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(32)] +#[repr(C)] pub struct svint16x4_t(i16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -511,8 +511,8 @@ impl Clone for svint16x4_t { /// SVE 16位无符号整数四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(32)] +#[repr(C)] pub struct svuint16x4_t(u16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -524,8 +524,8 @@ impl Clone for svuint16x4_t { /// SVE 32位浮点四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(16)] +#[repr(C)] pub struct svfloat32x4_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -537,8 +537,8 @@ impl Clone for svfloat32x4_t { /// SVE 32位有符号整数四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(16)] +#[repr(C)] pub struct svint32x4_t(i32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -550,8 +550,8 @@ impl Clone for svint32x4_t { /// SVE 32位无符号整数四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(16)] +#[repr(C)] pub struct svuint32x4_t(u32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -563,8 +563,8 @@ impl Clone for svuint32x4_t { /// SVE 64位浮点四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(8)] +#[repr(C)] pub struct svfloat64x4_t(f64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -576,8 +576,8 @@ impl Clone for svfloat64x4_t { /// SVE 64位有符号整数四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(8)] +#[repr(C)] pub struct svint64x4_t(i64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -589,8 +589,8 @@ impl Clone for svint64x4_t { /// SVE 64位无符号整数四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(8)] +#[repr(C)] pub struct svuint64x4_t(u64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -602,8 +602,8 @@ impl Clone for svuint64x4_t { /// SVE 16位浮点双向量 (x2) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(16)] +#[repr(C)] pub struct svfloat16x2_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -615,8 +615,8 @@ impl Clone for svfloat16x2_t { /// SVE 16位浮点三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(24)] +#[repr(C)] pub struct svfloat16x3_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -628,8 +628,8 @@ impl Clone for svfloat16x3_t { /// SVE 16位浮点四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[repr(C)] #[rustc_scalable_vector(32)] +#[repr(C)] pub struct svfloat16x4_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] From 593cf1fb80b3d62bfafc6000b2cb01c83b3373fb Mon Sep 17 00:00:00 2001 From: wxh Date: Thu, 20 Nov 2025 12:09:58 +0800 Subject: [PATCH 22/27] =?UTF-8?q?11.20=E4=B8=8A=E5=8D=88=E8=A1=A5=E5=85=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/sve.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs index f34073938ddae..558d2ece14655 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs @@ -814,6 +814,20 @@ pub fn svadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise AND"] #[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.z.nvx16i1")] + fn _svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svand_b_z(pg, op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = ""] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] From 0ea7bce8a11547e578a188ee438cd9b2d982c12c Mon Sep 17 00:00:00 2001 From: wxh Date: Thu, 20 Nov 2025 17:18:48 +0800 Subject: [PATCH 23/27] =?UTF-8?q?=E5=9C=A8=E6=A0=87=E5=87=86=E5=BA=93?= =?UTF-8?q?=E4=B8=AD=E9=9B=86=E6=88=90=E4=BA=86=E4=BB=A3=E7=A0=81=E7=94=9F?= =?UTF-8?q?=E6=88=90=E5=99=A8=E7=9A=84sve.rs=EF=BC=8C=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E4=BA=86=E6=9B=B4=E5=A4=9AIntrinsics?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../crates/core_arch/src/aarch64/sve/mod.rs | 7 +- .../crates/core_arch/src/aarch64/sve/sve.rs | 48701 ++++++++++++++-- .../crates/core_arch/src/aarch64/sve/types.rs | 98 + 3 files changed, 42331 insertions(+), 6475 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs index afe6e5083f95c..9ae6aeb29769b 100755 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -309,17 +309,12 @@ pub use types::*; // a) 外部内建 unsafe extern "C" { - #[link_name = "llvm.aarch64.sve.cntw"] - fn __llvm_sve_cntw() -> i32; - #[link_name = "llvm.aarch64.sve.whilelt"] fn __llvm_sve_whilelt_i32(i: i32, n: i32) -> svbool_t; } // b) 对外 API -#[inline] -#[target_feature(enable = "sve")] -pub unsafe fn svcntw() -> i32 { __llvm_sve_cntw() } +// 注意:svcntw() 函数在 sve.rs 中定义,使用正确的 LLVM 内建函数签名 #[inline] #[target_feature(enable = "sve")] diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs index 558d2ece14655..73ea1441edb32 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve.rs @@ -11,10325 +11,46088 @@ use stdarch_test::assert_instr; use super::*; +use crate::core_arch::arch::aarch64::*; -#[doc = "Absolute compare greater than or equal to"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facge))] -pub fn svacge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv4f32")] - fn _svacge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv4f32")] + fn _svabd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; } - unsafe { simd_cast(_svacge_f32(simd_cast(pg), op1, op2)) } + unsafe { _svabd_f32_m(pg.into(), op1, op2) } } -#[doc = "Absolute compare greater than or equal to"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facge))] -pub fn svacge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svacge_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_m(pg, op1, svdup_n_f32(op2)) } -#[doc = "Absolute compare greater than or equal to"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facge))] -pub fn svacge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv2f64")] - fn _svacge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; - } - unsafe { simd_cast(_svacge_f64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, op1, op2) } -#[doc = "Absolute compare greater than or equal to"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facge))] -pub fn svacge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svacge_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_x(pg, op1, svdup_n_f32(op2)) } -#[doc = "Absolute compare greater than"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facgt))] -pub fn svacgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv4f32")] - fn _svacgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; - } - unsafe { simd_cast(_svacgt_f32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) } -#[doc = "Absolute compare greater than"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facgt))] -pub fn svacgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svacgt_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_z(pg, op1, svdup_n_f32(op2)) } -#[doc = "Absolute compare greater than"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facgt))] -pub fn svacgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv2f64")] - fn _svacgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv2f64")] + fn _svabd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; } - unsafe { simd_cast(_svacgt_f64(simd_cast(pg), op1, op2)) } + unsafe { _svabd_f64_m(pg.into(), op1, op2) } } -#[doc = "Absolute compare greater than"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facgt))] -pub fn svacgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svacgt_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_m(pg, op1, svdup_n_f64(op2)) } -#[doc = "Absolute compare less than or equal to"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facge))] -pub fn svacle_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { - svacge_f32(pg, op2, op1) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, op1, op2) } -#[doc = "Absolute compare less than or equal to"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facge))] -pub fn svacle_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svacle_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_x(pg, op1, svdup_n_f64(op2)) } -#[doc = "Absolute compare less than or equal to"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facge))] -pub fn svacle_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - svacge_f64(pg, op2, op1) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) } -#[doc = "Absolute compare less than or equal to"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facge))] -pub fn svacle_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svacle_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_z(pg, op1, svdup_n_f64(op2)) } -#[doc = "Absolute compare less than"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facgt))] -pub fn svaclt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { - svacgt_f32(pg, op2, op1) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv16i8")] + fn _svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svabd_s8_m(pg, op1, op2) } } -#[doc = "Absolute compare less than"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facgt))] -pub fn svaclt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svaclt_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_m(pg, op1, svdup_n_s8(op2)) } -#[doc = "Absolute compare less than"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facgt))] -pub fn svaclt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - svacgt_f64(pg, op2, op1) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, op1, op2) } -#[doc = "Absolute compare less than"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(facgt))] -pub fn svaclt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svaclt_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_x(pg, op1, svdup_n_s8(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv4f32")] - fn _svadd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; - } - unsafe { _svadd_f32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svadd_f32_m(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_z(pg, op1, svdup_n_s8(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svadd_f32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv8i16")] + fn _svabd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svabd_s16_m(pg.into(), op1, op2) } } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svadd_f32_x(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svadd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, op1, op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svadd_f32_z(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv2f64")] - fn _svadd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; - } - unsafe { _svadd_f64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svadd_f64_m(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_z(pg, op1, svdup_n_s16(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svadd_f64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv4i32")] + fn _svabd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svabd_s32_m(pg.into(), op1, op2) } } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svadd_f64_x(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svadd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, op1, op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fadd))] -pub fn svadd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svadd_f64_z(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv16i8")] - fn _svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv2i64")] + fn _svabd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svadd_s8_m(pg, op1, op2) } + unsafe { _svabd_s64_m(pg.into(), op1, op2) } } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svadd_s8_m(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svadd_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, op1, op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svadd_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svadd_s8_z(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_z(pg, op1, svdup_n_s64(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv8i16")] - fn _svadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv16i8")] + fn _svabd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svadd_s16_m(simd_cast(pg), op1, op2) } + unsafe { _svabd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svadd_s16_m(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svadd_s16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, op1, op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svadd_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_x(pg, op1, svdup_n_u8(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svadd_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_z(pg, op1, svdup_n_u8(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv4i32")] - fn _svadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv8i16")] + fn _svabd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { _svadd_s32_m(simd_cast(pg), op1, op2) } + unsafe { _svabd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svadd_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_m(pg, op1, svdup_n_u16(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svadd_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, op1, op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svadd_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_x(pg, op1, svdup_n_u16(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svadd_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_z(pg, op1, svdup_n_u16(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv2i64")] - fn _svadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv4i32")] + fn _svabd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; } - unsafe { _svadd_s64_m(simd_cast(pg), op1, op2) } + unsafe { _svabd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svadd_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_m(pg, op1, svdup_n_u32(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svadd_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, op1, op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svadd_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_x(pg, op1, svdup_n_u32(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svadd_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_z(pg, op1, svdup_n_u32(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svadd_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv2i64")] + fn _svabd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svabd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svadd_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svadd_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, op1, op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svadd_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } -#[doc = "Add"] +#[doc = "Absolute difference"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svadd_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svadd_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv4f32")] + fn _svabs_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabs_f32_m(inactive, pg.into(), op) } } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svadd_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(op, pg, op) } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svadd_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(svdup_n_f32(0.0), pg, op) } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svadd_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv2f64")] + fn _svabs_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svabs_f64_m(inactive, pg.into(), op) } } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(op, pg, op) } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svadd_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(svdup_n_f64(0.0), pg, op) } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svadd_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv16i8")] + fn _svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svabs_s8_m(inactive, pg, op) } } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svadd_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(op, pg, op) } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svadd_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(svdup_n_s8(0), pg, op) } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svadd_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv8i16")] + fn _svabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svabs_s16_m(inactive, pg.into(), op) } } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(op, pg, op) } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svadd_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(svdup_n_s16(0), pg, op) } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svadd_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv4i32")] + fn _svabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svabs_s32_m(inactive, pg.into(), op) } } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svadd_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(op, pg, op) } -#[doc = "Add"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_x)"] -#[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svadd_u64_m(pg, op1, op2) -} -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svadd_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(svdup_n_s32(0), pg, op) } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv2i64")] + fn _svabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svabs_s64_m(inactive, pg.into(), op) } } -#[doc = "Add"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(add))] -pub fn svadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svadd_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(op, pg, op) } -#[doc = "Bitwise AND"] +#[doc = "Absolute value"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_b]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.z.nvx16i1")] - fn _svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; - } - unsafe { _svand_b_z(pg, op1, op2) } +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(svdup_n_s64(0), pg, op) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv16i8")] - fn _svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv4f32")] + fn _svacge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; } - unsafe { _svand_s8_m(pg, op1, op2) } + unsafe { _svacge_f32(pg.into(), op1, op2).into() } } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svand_s8_m(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacge_f32(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svand_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv2f64")] + fn _svacge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svacge_f64(pg.into(), op1, op2).into() } } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare greater than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svand_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacge_f64(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare greater than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svand_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv4f32")] + fn _svacgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svacgt_f32(pg.into(), op1, op2).into() } } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare greater than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svand_s8_z(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacgt_f32(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare greater than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv8i16")] - fn _svand_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv2f64")] + fn _svacgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; } - unsafe { _svand_s16_m(simd_cast(pg), op1, op2) } + unsafe { _svacgt_f64(pg.into(), op1, op2).into() } } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare greater than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svand_s16_m(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacgt_f64(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svand_s16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacge_f32(pg, op2, op1) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svand_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacle_f32(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svand_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacge_f64(pg, op2, op1) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svand_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacle_f64(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv4i32")] - fn _svand_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svand_s32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacgt_f32(pg, op2, op1) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svand_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svaclt_f32(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svand_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacgt_f64(pg, op2, op1) } -#[doc = "Bitwise AND"] +#[doc = "Absolute compare less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svand_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svaclt_f64(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svand_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv4f32")] + fn _svadd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svadd_f32_m(pg.into(), op1, op2) } } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svand_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_m(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv2i64")] - fn _svand_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; - } - unsafe { _svand_s64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, op1, op2) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svand_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_x(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svand_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svand_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_z(pg, op1, svdup_n_f32(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svand_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv2f64")] + fn _svadd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svadd_f64_m(pg.into(), op1, op2) } } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svand_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_m(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svand_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, op1, op2) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svand_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_x(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svand_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svand_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_z(pg, op1, svdup_n_f64(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svand_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv16i8")] + fn _svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svadd_s8_m(pg, op1, op2) } } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svand_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_m(pg, op1, svdup_n_s8(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svand_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, op1, op2) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svand_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_x(pg, op1, svdup_n_s8(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svand_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svand_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_z(pg, op1, svdup_n_s8(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svand_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv8i16")] + fn _svadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svadd_s16_m(pg.into(), op1, op2) } } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svand_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svand_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, op1, op2) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svand_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svand_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svand_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_z(pg, op1, svdup_n_s16(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svand_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv4i32")] + fn _svadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svadd_s32_m(pg.into(), op1, op2) } } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svand_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svand_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } -} -#[doc = "Bitwise AND"] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, op1, op2) +} +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svand_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svand_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svand_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_z(pg, op1, svdup_n_s32(op2)) } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svand_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv2i64")] + fn _svadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svadd_s64_m(pg.into(), op1, op2) } } -#[doc = "Bitwise AND"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(and))] -pub fn svand_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svand_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_b]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.z.nvx16i1")] - fn _svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; - } - unsafe { _svbic_b_z(pg, op1, op2) } +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, op1, op2) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv16i8")] - fn _svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; - } - unsafe { _svbic_s8_m(pg, op1, op2) } +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svbic_s8_m(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svbic_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_z(pg, op1, svdup_n_s64(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svbic_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svadd_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svbic_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svbic_s8_z(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, op1, op2) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv8i16")] - fn _svbic_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; - } - unsafe { _svbic_s16_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_x(pg, op1, svdup_n_u8(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svbic_s16_m(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svbic_s16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_z(pg, op1, svdup_n_u8(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svbic_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svadd_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svbic_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_m(pg, op1, svdup_n_u16(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svbic_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, op1, op2) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv4i32")] - fn _svbic_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svbic_s32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_x(pg, op1, svdup_n_u16(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svbic_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svbic_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_z(pg, op1, svdup_n_u16(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svbic_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svadd_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svbic_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_m(pg, op1, svdup_n_u32(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svbic_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, op1, op2) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv2i64")] - fn _svbic_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; - } - unsafe { _svbic_s64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_x(pg, op1, svdup_n_u32(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svbic_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svbic_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_z(pg, op1, svdup_n_u32(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svbic_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svadd_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svbic_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svbic_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, op1, op2) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svbic_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svbic_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } -#[doc = "Bitwise clear"] +#[doc = "Add"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svbic_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Bitwise clear"] +#[doc = "Add reduction (strictly-ordered)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svbic_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(fadda))] +pub fn svadda_f32(pg: svbool_t, initial: f32, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv4f32")] + fn _svadda_f32(pg: svbool4_t, initial: f32, op: svfloat32_t) -> f32; + } + unsafe { _svadda_f32(pg.into(), initial, op) } } -#[doc = "Bitwise clear"] +#[doc = "Add reduction (strictly-ordered)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svbic_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(fadda))] +pub fn svadda_f64(pg: svbool_t, initial: f64, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv2f64")] + fn _svadda_f64(pg: svbool2_t, initial: f64, op: svfloat64_t) -> f64; + } + unsafe { _svadda_f64(pg.into(), initial, op) } } -#[doc = "Bitwise clear"] +#[doc = "Add reduction"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svbic_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv4f32")] + fn _svaddv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svaddv_f32(pg.into(), op) } } -#[doc = "Bitwise clear"] +#[doc = "Add reduction"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svbic_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv2f64")] + fn _svaddv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svaddv_f64(pg.into(), op) } } -#[doc = "Bitwise clear"] +#[doc = "Add reduction"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svbic_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv2i64")] + fn _svaddv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_s64(pg.into(), op) } } -#[doc = "Bitwise clear"] +#[doc = "Add reduction"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svbic_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv2i64")] + fn _svaddv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_u64(pg.into(), op.as_signed()).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Add reduction"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svbic_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv16i8")] + fn _svaddv_s8(pg: svbool_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_s8(pg, op) } } -#[doc = "Bitwise clear"] +#[doc = "Add reduction"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svbic_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv8i16")] + fn _svaddv_s16(pg: svbool8_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_s16(pg.into(), op) } } -#[doc = "Bitwise clear"] +#[doc = "Add reduction"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svbic_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv4i32")] + fn _svaddv_s32(pg: svbool4_t, op: svint32_t) -> i64; + } + unsafe { _svaddv_s32(pg.into(), op) } } -#[doc = "Bitwise clear"] +#[doc = "Add reduction"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svbic_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv16i8")] + fn _svaddv_u8(pg: svbool_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_u8(pg, op.as_signed()).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Add reduction"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svbic_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv8i16")] + fn _svaddv_u16(pg: svbool8_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_u16(pg.into(), op.as_signed()).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Add reduction"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svbic_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv4i32")] + fn _svaddv_u32(pg: svbool4_t, op: svint32_t) -> i64; + } + unsafe { _svaddv_u32(pg.into(), op.as_signed()).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Compute vector addresses for 8-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[s32]offset)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svbic_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u32base_s32offset(bases: svuint32_t, offsets: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv4i32")] + fn _svadrb_u32base_s32offset(bases: svint32_t, offsets: svint32_t) -> svint32_t; + } + unsafe { _svadrb_u32base_s32offset(bases.as_signed(), offsets).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Compute vector addresses for 16-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[s32]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svbic_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv4i32")] + fn _svadrh_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrh_u32base_s32index(bases.as_signed(), indices).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Compute vector addresses for 32-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[s32]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svbic_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv4i32")] + fn _svadrw_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrw_u32base_s32index(bases.as_signed(), indices).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Compute vector addresses for 64-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[s32]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svbic_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv4i32")] + fn _svadrd_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrd_u32base_s32index(bases.as_signed(), indices).as_unsigned() } } -#[doc = "Bitwise clear"] +#[doc = "Compute vector addresses for 8-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[u32]offset)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svbic_u64_m(pg, op1, svdup_n_u64(op2)) -} -#[doc = "Bitwise clear"] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u32base_u32offset(bases: svuint32_t, offsets: svuint32_t) -> svuint32_t { + unsafe { svadrb_u32base_s32offset(bases, offsets.as_signed()) } +} +#[doc = "Compute vector addresses for 16-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[u32]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svbic_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrh_u32base_s32index(bases, indices.as_signed()) } } -#[doc = "Bitwise clear"] +#[doc = "Compute vector addresses for 32-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[u32]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svbic_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrw_u32base_s32index(bases, indices.as_signed()) } } -#[doc = "Bitwise clear"] +#[doc = "Compute vector addresses for 64-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[u32]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svbic_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrd_u32base_s32index(bases, indices.as_signed()) } } -#[doc = "Bitwise clear"] +#[doc = "Compute vector addresses for 8-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[s64]offset)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(bic))] -pub fn svbic_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svbic_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u64base_s64offset(bases: svuint64_t, offsets: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv2i64")] + fn _svadrb_u64base_s64offset(bases: svint64_t, offsets: svint64_t) -> svint64_t; + } + unsafe { _svadrb_u64base_s64offset(bases.as_signed(), offsets).as_unsigned() } } -#[doc = "Break after first true condition"] -#[doc = ""] -#[doc = "Break after first true condition"] +#[doc = "Compute vector addresses for 16-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[s64]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(brka))] -pub fn svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.nxv16i1")] - fn _svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv2i64")] + fn _svadrh_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; } - unsafe { _svbrka_b_m(inactive, pg, op) } + unsafe { _svadrh_u64base_s64index(bases.as_signed(), indices).as_unsigned() } } -#[doc = "Break after first true condition"] +#[doc = "Compute vector addresses for 32-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[s64]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(brka))] -pub fn svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.z.nxv16i1")] - fn _svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv2i64")] + fn _svadrw_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; } - unsafe { _svbrka_b_z(pg, op) } + unsafe { _svadrw_u64base_s64index(bases.as_signed(), indices).as_unsigned() } } -#[doc = "Break before first true condition"] +#[doc = "Compute vector addresses for 64-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[s64]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(brkb))] -pub fn svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.nxv16i1")] - fn _svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv2i64")] + fn _svadrd_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; } - unsafe { _svbrkb_b_m(inactive, pg, op) } + unsafe { _svadrd_u64base_s64index(bases.as_signed(), indices).as_unsigned() } } -#[doc = "Break before first true condition"] +#[doc = "Compute vector addresses for 8-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[u64]offset)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(brkb))] -pub fn svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.z.nxv16i1")] - fn _svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; - } - unsafe { _svbrkb_b_z(pg, op) } +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u64base_u64offset(bases: svuint64_t, offsets: svuint64_t) -> svuint64_t { + unsafe { svadrb_u64base_s64offset(bases, offsets.as_signed()) } } -#[doc = "Propagate break to next partition"] +#[doc = "Compute vector addresses for 16-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkn[_b]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[u64]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(brkn))] -pub fn svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkn.z.nxv16i1")] - fn _svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; - } - unsafe { _svbrkn_b_z(pg, op1, op2) } +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrh_u64base_s64index(bases, indices.as_signed()) } } -#[doc = "Break after first true condition, propagating from previous partition"] +#[doc = "Compute vector addresses for 32-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpa[_b]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[u64]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(brkpa))] -pub fn svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.brkpa.z.nxv16i1" - )] - fn _svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; - } - unsafe { _svbrkpa_b_z(pg, op1, op2) } +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrw_u64base_s64index(bases, indices.as_signed()) } } -#[doc = "Break before first true condition, propagating from previous partition"] +#[doc = "Compute vector addresses for 64-bit data"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpb[_b]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[u64]index)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(brkpb))] -pub fn svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrd_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Bitwise AND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.brkpb.z.nxv16i1" - )] - fn _svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.z.nvx16i1")] + fn _svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; } - unsafe { _svbrkpb_b_z(pg, op1, op2) } + unsafe { _svand_b_z(pg, op1, op2) } } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f32_m( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, -) -> svfloat32_t { - static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv4f32")] - fn _svcadd_f32_m( - pg: svbool4_t, - op1: svfloat32_t, - op2: svfloat32_t, - imm_rotation: i32, - ) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv16i8")] + fn _svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svcadd_f32_m(simd_cast(pg), op1, op2, IMM_ROTATION) } + unsafe { _svand_s8_m(pg, op1, op2) } } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f32_x( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, -) -> svfloat32_t { - svcadd_f32_m::(pg, op1, op2) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_m(pg, op1, svdup_n_s8(op2)) } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f32_z( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, -) -> svfloat32_t { - svcadd_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, op1, op2) } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f64_m( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, -) -> svfloat64_t { - static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv2f64")] - fn _svcadd_f64_m( - pg: svbool2_t, - op1: svfloat64_t, - op2: svfloat64_t, - imm_rotation: i32, - ) -> svfloat64_t; - } - unsafe { _svcadd_f64_m(simd_cast(pg), op1, op2, IMM_ROTATION) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_x(pg, op1, svdup_n_s8(op2)) } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f64_x( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, -) -> svfloat64_t { - svcadd_f64_m::(pg, op1, op2) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Complex add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] -pub fn svcadd_f64_z( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, -) -> svfloat64_t { - svcadd_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_z(pg, op1, svdup_n_s8(op2)) } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f32_m( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, -) -> svfloat32_t { - static_assert!( - IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 - ); +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv4f32")] - fn _svcmla_f32_m( - pg: svbool4_t, - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, - imm_rotation: i32, - ) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv8i16")] + fn _svand_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { _svcmla_f32_m(simd_cast(pg), op1, op2, op3, IMM_ROTATION) } + unsafe { _svand_s16_m(pg.into(), op1, op2) } } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"] -#[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f32_x( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, -) -> svfloat32_t { - svcmla_f32_m::(pg, op1, op2, op3) -} -#[doc = "Complex multiply-add with rotate"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f32_z( - pg: svbool_t, - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, -) -> svfloat32_t { - svcmla_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f64_m( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, - op3: svfloat64_t, -) -> svfloat64_t { - static_assert!( - IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 - ); - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv2f64")] - fn _svcmla_f64_m( - pg: svbool2_t, - op1: svfloat64_t, - op2: svfloat64_t, - op3: svfloat64_t, - imm_rotation: i32, - ) -> svfloat64_t; - } - unsafe { _svcmla_f64_m(simd_cast(pg), op1, op2, op3, IMM_ROTATION) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, op1, op2) } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f64_x( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, - op3: svfloat64_t, -) -> svfloat64_t { - svcmla_f64_m::(pg, op1, op2, op3) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] -pub fn svcmla_f64_z( - pg: svbool_t, - op1: svfloat64_t, - op2: svfloat64_t, - op3: svfloat64_t, -) -> svfloat64_t { - svcmla_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Complex multiply-add with rotate"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))] -pub fn svcmla_lane_f32( - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, -) -> svfloat32_t { - static_assert_range!(IMM_INDEX, 0, 1); - static_assert!( - IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 - ); - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.fcmla.lane.x.nxv4f32" - )] - fn _svcmla_lane_f32( - op1: svfloat32_t, - op2: svfloat32_t, - op3: svfloat32_t, - imm_index: i32, - imm_rotation: i32, - ) -> svfloat32_t; - } - unsafe { _svcmla_lane_f32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_z(pg, op1, svdup_n_s16(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmeq))] -pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv4f32")] - fn _svcmpeq_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv4i32")] + fn _svand_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; } - unsafe { simd_cast(_svcmpeq_f32(simd_cast(pg), op1, op2)) } + unsafe { _svand_s32_m(pg.into(), op1, op2) } } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmeq))] -pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmpeq_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmeq))] -pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv2f64")] - fn _svcmpeq_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpeq_f64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, op1, op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmeq))] -pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmpeq_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv16i8")] - fn _svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; - } - unsafe { _svcmpeq_s8(pg, op1, op2) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmpeq_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_z(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv8i16")] - fn _svcmpeq_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv2i64")] + fn _svand_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { simd_cast(_svcmpeq_s16(simd_cast(pg), op1, op2)) } + unsafe { _svand_s64_m(pg.into(), op1, op2) } } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmpeq_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv4i32")] - fn _svcmpeq_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmpeq_s32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, op1, op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmpeq_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv2i64")] - fn _svcmpeq_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpeq_s64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmpeq_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_z(pg, op1, svdup_n_s64(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - unsafe { svcmpeq_s8(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svand_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmpeq_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - unsafe { svcmpeq_s16(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, op1, op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmpeq_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_x(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - unsafe { svcmpeq_s32(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmpeq_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_z(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - unsafe { svcmpeq_s64(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svand_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Compare equal to"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpeq))] -pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmpeq_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_m(pg, op1, svdup_n_u16(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv4f32")] - fn _svcmpgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmpgt_f32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, op1, op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmpgt_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_x(pg, op1, svdup_n_u16(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv2f64")] - fn _svcmpgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpgt_f64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmpgt_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_z(pg, op1, svdup_n_u16(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv16i8")] - fn _svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; - } - unsafe { _svcmpgt_s8(pg, op1, op2) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svand_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmpgt_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_m(pg, op1, svdup_n_u32(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv8i16")] - fn _svcmpgt_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; - } - unsafe { simd_cast(_svcmpgt_s16(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, op1, op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmpgt_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_x(pg, op1, svdup_n_u32(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv4i32")] - fn _svcmpgt_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmpgt_s32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmpgt_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_z(pg, op1, svdup_n_u32(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv2i64")] - fn _svcmpgt_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpgt_s64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svand_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmpgt_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - unsafe { svcmpgt_s8(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, op1, op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmpgt_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - unsafe { svcmpgt_s16(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmpgt_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND reduction to scalar"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - unsafe { svcmpgt_s32(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv16i8")] + fn _svandv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svandv_s8(pg, op) } } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND reduction to scalar"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmpgt_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv8i16")] + fn _svandv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svandv_s16(pg.into(), op) } } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND reduction to scalar"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - unsafe { svcmpgt_s64(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv4i32")] + fn _svandv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svandv_s32(pg.into(), op) } } -#[doc = "Compare greater than"] +#[doc = "Bitwise AND reduction to scalar"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmpgt_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv2i64")] + fn _svandv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svandv_s64(pg.into(), op) } } -#[doc = "Compare less than"] +#[doc = "Bitwise AND reduction to scalar"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { - svcmpgt_f32(pg, op2, op1) +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svandv_s8(pg, op.as_signed()).as_unsigned() } } -#[doc = "Compare less than"] +#[doc = "Bitwise AND reduction to scalar"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmplt_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svandv_s16(pg, op.as_signed()).as_unsigned() } } -#[doc = "Compare less than"] +#[doc = "Bitwise AND reduction to scalar"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - svcmpgt_f64(pg, op2, op1) +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svandv_s32(pg, op.as_signed()).as_unsigned() } } -#[doc = "Compare less than"] +#[doc = "Bitwise AND reduction to scalar"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmgt))] -pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmplt_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svandv_s64(pg, op.as_signed()).as_unsigned() } } - -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmne))] -pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv4f32")] - fn _svcmpne_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv16i8")] + fn _svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { simd_cast(_svcmpne_f32(simd_cast(pg), op1, op2)) } + unsafe { _svasr_s8_m(pg, op1, op2.as_signed()) } } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmne))] -pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmpne_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmne))] -pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv2f64")] - fn _svcmpne_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpne_f64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svasr_s8_m(pg, op1, op2) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmne))] -pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmpne_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_x(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv16i8")] - fn _svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; - } - unsafe { _svcmpne_s8(pg, op1, op2) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svasr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmpne_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_z(pg, op1, svdup_n_u8(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv8i16")] - fn _svcmpne_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv8i16")] + fn _svasr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { simd_cast(_svcmpne_s16(simd_cast(pg), op1, op2)) } + unsafe { _svasr_s16_m(pg.into(), op1, op2.as_signed()) } } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmpne_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_m(pg, op1, svdup_n_u16(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv4i32")] - fn _svcmpne_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmpne_s32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svasr_s16_m(pg, op1, op2) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmpne_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_x(pg, op1, svdup_n_u16(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv2i64")] - fn _svcmpne_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpne_s64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svasr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmpne_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_z(pg, op1, svdup_n_u16(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - unsafe { svcmpne_s8(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv4i32")] + fn _svasr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svasr_s32_m(pg.into(), op1, op2.as_signed()) } } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmpne_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_m(pg, op1, svdup_n_u32(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - unsafe { svcmpne_s16(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svasr_s32_m(pg, op1, op2) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmpne_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_x(pg, op1, svdup_n_u32(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - unsafe { svcmpne_s32(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svasr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmpne_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_z(pg, op1, svdup_n_u32(op2)) } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - unsafe { svcmpne_s64(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv2i64")] + fn _svasr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svasr_s64_m(pg.into(), op1, op2.as_signed()) } } -#[doc = "Compare not equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpne))] -pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmpne_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_m(pg, op1, svdup_n_u64(op2)) } - -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmge))] -pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv4f32")] - fn _svcmpge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmpge_f32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svasr_s64_m(pg, op1, op2) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmge))] -pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmpge_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmge))] -pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv2f64")] - fn _svcmpge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; - } - unsafe { simd_cast(_svcmpge_f64(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svasr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmge))] -pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmpge_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv16i8")] - fn _svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv16i8" + )] + fn _svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; } - unsafe { _svcmpge_s8(pg, op1, op2) } + unsafe { _svasr_wide_s8_m(pg, op1, op2.as_signed()) } } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmpge_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv8i16")] - fn _svcmpge_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; - } - unsafe { simd_cast(_svcmpge_s16(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svasr_wide_s8_m(pg, op1, op2) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmpge_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv4i32")] - fn _svcmpge_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmpge_s32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svasr_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmpge_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv2i64")] - fn _svcmpge_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv8i16" + )] + fn _svasr_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; } - unsafe { simd_cast(_svcmpge_s64(simd_cast(pg), op1, op2)) } + unsafe { _svasr_wide_s16_m(pg.into(), op1, op2.as_signed()) } } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpge))] -pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmpge_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - unsafe { svcmpge_s8(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svasr_wide_s16_m(pg, op1, op2) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmpge_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - unsafe { svcmpge_s16(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svasr_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmpge_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - unsafe { svcmpge_s32(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv4i32" + )] + fn _svasr_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svasr_wide_s32_m(pg.into(), op1, op2.as_signed()) } } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmpge_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - unsafe { svcmpge_s64(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svasr_wide_s32_m(pg, op1, op2) } -#[doc = "Compare greater than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmhs))] -pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmpge_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_x(pg, op1, svdup_n_u64(op2)) } - -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmle))] -pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmple.nxv4f32")] - fn _svcmple_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; - } - unsafe { simd_cast(_svcmple_f32(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svasr_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmle))] -pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { - svcmple_f32(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right for divide by immediate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmle))] -pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmple.nxv2f64")] - fn _svcmple_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv16i8")] + fn _svasrd_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; } - unsafe { simd_cast(_svcmple_f64(simd_cast(pg), op1, op2)) } + unsafe { _svasrd_n_s8_m(pg, op1, IMM2) } } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right for divide by immediate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fcmle))] -pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { - svcmple_f64(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { + svasrd_n_s8_m::(pg, op1) } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right for divide by immediate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv16i8")] - fn _svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; - } - unsafe { _svcmple_s8(pg, op1, op2) } +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { + svasrd_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right for divide by immediate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmple_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv8i16")] + fn _svasrd_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svasrd_n_s16_m(pg.into(), op1, IMM2) } } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right for divide by immediate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv8i16")] - fn _svcmple_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; - } - unsafe { simd_cast(_svcmple_s16(simd_cast(pg), op1, op2)) } +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_t { + svasrd_n_s16_m::(pg, op1) } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right for divide by immediate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmple_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_t { + svasrd_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right for divide by immediate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv4i32")] - fn _svcmple_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv4i32")] + fn _svasrd_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; } - unsafe { simd_cast(_svcmple_s32(simd_cast(pg), op1, op2)) } + unsafe { _svasrd_n_s32_m(pg.into(), op1, IMM2) } } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right for divide by immediate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmple_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_t { + svasrd_n_s32_m::(pg, op1) } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right for divide by immediate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_t { + svasrd_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_t { + static_assert_range!(IMM2, 1, 64); unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmple.nxv2i64")] - fn _svcmple_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv2i64")] + fn _svasrd_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; } - unsafe { simd_cast(_svcmple_s64(simd_cast(pg), op1, op2)) } + unsafe { _svasrd_n_s64_m(pg.into(), op1, IMM2) } } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right for divide by immediate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmple))] -pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmple_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_t { + svasrd_n_s64_m::(pg, op1) } -#[doc = "Compare less than or equal to"] +#[doc = "Arithmetic shift right for divide by immediate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - unsafe { svcmple_s8(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_t { + svasrd_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) } -#[doc = "Compare less than or equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_b]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmple_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.z.nvx16i1")] + fn _svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbic_b_z(pg, op1, op2) } } -#[doc = "Compare less than or equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - unsafe { svcmple_s16(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv16i8")] + fn _svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbic_s8_m(pg, op1, op2) } } -#[doc = "Compare less than or equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmple_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_m(pg, op1, svdup_n_s8(op2)) } -#[doc = "Compare less than or equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - unsafe { svcmple_s32(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, op1, op2) } -#[doc = "Compare less than or equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmple_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_x(pg, op1, svdup_n_s8(op2)) } -#[doc = "Compare less than or equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - unsafe { svcmple_s64(pg, op1.as_signed(), op2.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Compare less than or equal to"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmls))] -pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmple_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_z(pg, op1, svdup_n_s8(op2)) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { - svcmpgt_s8(pg, op2, op1) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv8i16")] + fn _svbic_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbic_s16_m(pg.into(), op1, op2) } } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { - svcmplt_s8(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { - svcmpgt_s16(pg, op2, op1) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, op1, op2) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { - svcmplt_s16(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { - svcmpgt_s32(pg, op2, op1) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { - svcmplt_s32(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_z(pg, op1, svdup_n_s16(op2)) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { - svcmpgt_s64(pg, op2, op1) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv4i32")] + fn _svbic_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbic_s32_m(pg.into(), op1, op2) } } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmpgt))] -pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { - svcmplt_s64(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { - svcmpgt_u8(pg, op2, op1) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, op1, op2) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { - svcmplt_u8(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { - svcmpgt_u16(pg, op2, op1) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { - svcmplt_u16(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_z(pg, op1, svdup_n_s32(op2)) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { - svcmpgt_u32(pg, op2, op1) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv2i64")] + fn _svbic_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbic_s64_m(pg.into(), op1, op2) } } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { - svcmplt_u32(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { - svcmpgt_u64(pg, op2, op1) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, op1, op2) } -#[doc = "Compare less than"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cmphi))] -pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { - svcmplt_u64(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32" - )] - fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; - } - unsafe { _svcvt_f32_s32_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { - unsafe { svcvt_f32_s32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_z(pg, op1, svdup_n_s64(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { - svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svbic_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")] - fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; - } - unsafe { _svcvt_f32_s64_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { - unsafe { svcvt_f32_s64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, op1, op2) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { - svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_x(pg, op1, svdup_n_u8(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32" - )] - fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; - } - unsafe { _svcvt_f32_u32_m(inactive, simd_cast(pg), op.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { - unsafe { svcvt_f32_u32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_z(pg, op1, svdup_n_u8(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { - svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svbic_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")] - fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; - } - unsafe { _svcvt_f32_u64_m(inactive, simd_cast(pg), op.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_m(pg, op1, svdup_n_u16(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { - unsafe { svcvt_f32_u64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, op1, op2) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { - svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_x(pg, op1, svdup_n_u16(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv4i32" - )] - fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; - } - unsafe { _svcvt_f64_s32_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { - unsafe { svcvt_f64_s32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_z(pg, op1, svdup_n_u16(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { - svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svbic_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64" - )] - fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; - } - unsafe { _svcvt_f64_s64_m(inactive, simd_cast(pg), op) } -} -#[doc = "Floating-point convert"] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { - unsafe { svcvt_f64_s64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, op1, op2) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(scvtf))] -pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { - svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_x(pg, op1, svdup_n_u32(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv4i32" - )] - fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; - } - unsafe { _svcvt_f64_u32_m(inactive, simd_cast(pg), op.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { - unsafe { svcvt_f64_u32_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_z(pg, op1, svdup_n_u32(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { - svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svbic_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64" - )] - fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; - } - unsafe { _svcvt_f64_u64_m(inactive, simd_cast(pg), op.as_signed()) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { - unsafe { svcvt_f64_u64_m(simd_reinterpret(op), pg, op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, op1, op2) } -#[doc = "Floating-point convert"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ucvtf))] -pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { - svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_f32(op: f32) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] - fn _svdup_n_f32(op: f32) -> svfloat32_t; - } - unsafe { _svdup_n_f32(op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } -#[doc = "Broadcast a scalar value"] +#[doc = "Bitwise clear"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_f64(op: f64) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")] - fn _svdup_n_f64(op: f64) -> svfloat64_t; - } - unsafe { _svdup_n_f64(op) } +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Broadcast a scalar value"] +#[doc = "Break after first true condition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s8(op: i8) -> svint8_t { +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")] - fn _svdup_n_s8(op: i8) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.nxv16i1")] + fn _svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; } - unsafe { _svdup_n_s8(op) } + unsafe { _svbrka_b_m(inactive, pg, op) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Break after first true condition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s16(op: i16) -> svint16_t { +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")] - fn _svdup_n_s16(op: i16) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.z.nxv16i1")] + fn _svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; } - unsafe { _svdup_n_s16(op) } + unsafe { _svbrka_b_z(pg, op) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Break before first true condition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s32(op: i32) -> svint32_t { +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] - fn _svdup_n_s32(op: i32) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.nxv16i1")] + fn _svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; } - unsafe { _svdup_n_s32(op) } + unsafe { _svbrkb_b_m(inactive, pg, op) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Break before first true condition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_s64(op: i64) -> svint64_t { +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] - fn _svdup_n_s64(op: i64) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.z.nxv16i1")] + fn _svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; } - unsafe { _svdup_n_s64(op) } + unsafe { _svbrkb_b_z(pg, op) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Propagate break to next partition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkn[_b]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u8(op: u8) -> svuint8_t { - unsafe { svdup_n_s8(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(brkn))] +pub fn svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkn.z.nxv16i1")] + fn _svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkn_b_z(pg, op1, op2) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Break after first true condition, propagating from previous partition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpa[_b]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u16(op: u16) -> svuint16_t { - unsafe { svdup_n_s16(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(brkpa))] +pub fn svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpa.z.nxv16i1" + )] + fn _svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpa_b_z(pg, op1, op2) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Break before first true condition, propagating from previous partition"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpb[_b]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u32(op: u32) -> svuint32_t { - unsafe { svdup_n_s32(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(brkpb))] +pub fn svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpb.z.nxv16i1" + )] + fn _svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpb_b_z(pg, op1, op2) } } -#[doc = "Broadcast a scalar value"] +#[doc = "Complex add with rotate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mov))] -pub fn svdup_n_u64(op: u64) -> svuint64_t { - unsafe { svdup_n_s64(op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv4f32")] + fn _svcadd_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcadd_f32_m(pg.into(), op1, op2, IMM_ROTATION) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"] +#[doc = "Complex add with rotate"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] - fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; - } - _svld1_f32(simd_cast(pg), base) +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, op1, op2) } -#[doc = "Unextended load"] +#[doc = "Complex add with rotate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Complex add with rotate"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] - fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv2f64")] + fn _svcadd_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; } - _svld1_f64(simd_cast(pg), base) + unsafe { _svcadd_f64_m(pg.into(), op1, op2, IMM_ROTATION) } } -#[doc = "Unextended load"] +#[doc = "Complex add with rotate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1b))] -pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Conditionally extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] - fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4f32")] + fn _svclasta_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t; } - _svld1_s8(pg, base) + unsafe { _svclasta_f32(pg.into(), fallback, data) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1h))] -pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] - fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2f64")] + fn _svclasta_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t; } - _svld1_s16(simd_cast(pg), base) + unsafe { _svclasta_f64(pg.into(), fallback, data) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] - fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv16i8")] + fn _svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t; } - _svld1_s32(simd_cast(pg), base) + unsafe { _svclasta_s8(pg, fallback, data) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] - fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv8i16")] + fn _svclasta_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t; } - _svld1_s64(simd_cast(pg), base) + unsafe { _svclasta_s16(pg.into(), fallback, data) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1b))] -pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { - svld1_s8(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4i32")] + fn _svclasta_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t; + } + unsafe { _svclasta_s32(pg.into(), fallback, data) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1h))] -pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { - svld1_s16(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2i64")] + fn _svclasta_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t; + } + unsafe { _svclasta_s64(pg.into(), fallback, data) } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { - svld1_s32(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t { + unsafe { svclasta_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Unextended load"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { - svld1_s64(pg, base.as_signed()).as_unsigned() +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t { + unsafe { svclasta_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")] - fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; - } - unsafe { _svmul_f32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t { + unsafe { svclasta_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svmul_f32_m(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t { + unsafe { svclasta_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svmul_f32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv4f32" + )] + fn _svclasta_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32; + } + unsafe { _svclasta_n_f32(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svmul_f32_x(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv2f64" + )] + fn _svclasta_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64; + } + unsafe { _svclasta_n_f64(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv16i8" + )] + fn _svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8; + } + unsafe { _svclasta_n_s8(pg, fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svmul_f32_z(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv8i16" + )] + fn _svclasta_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16; + } + unsafe { _svclasta_n_s16(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")] - fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv4i32" + )] + fn _svclasta_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32; } - unsafe { _svmul_f64_m(simd_cast(pg), op1, op2) } + unsafe { _svclasta_n_s32(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svmul_f64_m(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv2i64" + )] + fn _svclasta_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64; + } + unsafe { _svclasta_n_s64(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svmul_f64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { + unsafe { svclasta_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svmul_f64_x(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { + unsafe { svclasta_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { + unsafe { svclasta_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Conditionally extract element after last"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fmul))] -pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svmul_f64_z(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { + unsafe { svclasta_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")] - fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4f32")] + fn _svclastb_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t; } - unsafe { _svmul_s8_m(pg, op1, op2) } -} -#[doc = "Multiply"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] -#[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svmul_s8_m(pg, op1, svdup_n_s8(op2)) + unsafe { _svclastb_f32(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svmul_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2f64")] + fn _svclastb_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t; + } + unsafe { _svclastb_f64(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svmul_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv16i8")] + fn _svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t; + } + unsafe { _svclastb_s8(pg, fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv8i16")] + fn _svclastb_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t; + } + unsafe { _svclastb_s16(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svmul_s8_z(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4i32")] + fn _svclastb_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t; + } + unsafe { _svclastb_s32(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")] - fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2i64")] + fn _svclastb_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t; } - unsafe { _svmul_s16_m(simd_cast(pg), op1, op2) } + unsafe { _svclastb_s64(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svmul_s16_m(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t { + unsafe { svclastb_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svmul_s16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t { + unsafe { svclastb_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svmul_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t { + unsafe { svclastb_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t { + unsafe { svclastb_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svmul_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv4f32" + )] + fn _svclastb_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32; + } + unsafe { _svclastb_n_f32(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")] - fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv2f64" + )] + fn _svclastb_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64; } - unsafe { _svmul_s32_m(simd_cast(pg), op1, op2) } + unsafe { _svclastb_n_f64(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svmul_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv16i8" + )] + fn _svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8; + } + unsafe { _svclastb_n_s8(pg, fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svmul_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv8i16" + )] + fn _svclastb_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16; + } + unsafe { _svclastb_n_s16(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svmul_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv4i32" + )] + fn _svclastb_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32; + } + unsafe { _svclastb_n_s32(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv2i64" + )] + fn _svclastb_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64; + } + unsafe { _svclastb_n_s64(pg.into(), fallback, data) } } -#[doc = "Multiply"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svmul_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { + unsafe { svclastb_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Divide"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")] - fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svdiv_s32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { + unsafe { svclastb_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Divide"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svdiv_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { + unsafe { svclastb_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Divide"] +#[doc = "Conditionally extract last element"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svdiv_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { + unsafe { svclastb_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } } -#[doc = "Divide"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svdiv_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv16i8")] + fn _svcls_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcls_s8_m(inactive.as_signed(), pg, op).as_unsigned() } } -#[doc = "Divide"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svcls_s8_m(op.as_unsigned(), pg, op) } } -#[doc = "Divide"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sdiv))] -pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svdiv_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svcls_s8_m(svdup_n_u8(0), pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")] - fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv8i16")] + fn _svcls_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; } - unsafe { _svmul_s64_m(simd_cast(pg), op1, op2) } + unsafe { _svcls_s16_m(inactive.as_signed(), pg.into(), op).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svmul_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svcls_s16_m(op.as_unsigned(), pg, op) } } -#[doc = "Multiply"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svmul_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svcls_s16_m(svdup_n_u16(0), pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svmul_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv4i32")] + fn _svcls_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcls_s32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svcls_s32_m(op.as_unsigned(), pg, op) } } -#[doc = "Multiply"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svmul_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svcls_s32_m(svdup_n_u32(0), pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv2i64")] + fn _svcls_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcls_s64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svmul_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svcls_s64_m(op.as_unsigned(), pg, op) } } -#[doc = "Multiply"] +#[doc = "Count leading sign bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svmul_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svcls_s64_m(svdup_n_u64(0), pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svmul_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv16i8")] + fn _svclz_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svclz_s8_m(inactive.as_signed(), pg, op).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svclz_s8_m(op.as_unsigned(), pg, op) } } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svmul_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svclz_s8_m(svdup_n_u8(0), pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv8i16")] + fn _svclz_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svclz_s16_m(inactive.as_signed(), pg.into(), op).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svmul_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svclz_s16_m(op.as_unsigned(), pg, op) } } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svmul_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svclz_s16_m(svdup_n_u16(0), pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svmul_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv4i32")] + fn _svclz_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svclz_s32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svclz_s32_m(op.as_unsigned(), pg, op) } } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svmul_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svclz_s32_m(svdup_n_u32(0), pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv2i64")] + fn _svclz_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svclz_s64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svmul_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svclz_s64_m(op.as_unsigned(), pg, op) } } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svmul_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svclz_s64_m(svdup_n_u64(0), pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svmul_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svclz_s8_m(inactive, pg, op.as_signed()) } } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svclz_u8_m(op, pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svmul_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svclz_u8_m(svdup_n_u8(0), pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svclz_s16_m(inactive, pg, op.as_signed()) } } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svmul_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svclz_u16_m(op, pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svmul_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svclz_u16_m(svdup_n_u16(0), pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svmul_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svclz_s32_m(inactive, pg, op.as_signed()) } } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svclz_u32_m(op, pg, op) } -#[doc = "Multiply"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(mul))] -pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svmul_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svclz_u32_m(svdup_n_u32(0), pg, op) } -#[doc = "Reciprocal estimate"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frecpe))] -pub fn svrecpe_f32(op: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.frecpe.x.nxv4f32" - )] - fn _svrecpe_f32(op: svfloat32_t) -> svfloat32_t; - } - unsafe { _svrecpe_f32(op) } +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svclz_s64_m(inactive, pg, op.as_signed()) } } -#[doc = "Reciprocal estimate"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frecpe))] -pub fn svrecpe_f64(op: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.frecpe.x.nxv2f64" - )] - fn _svrecpe_f64(op: svfloat64_t) -> svfloat64_t; - } - unsafe { _svrecpe_f64(op) } +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svclz_u64_m(op, pg, op) } -#[doc = "Reciprocal step"] +#[doc = "Count leading zero bits"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frecps))] -pub fn svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.frecps.x.nxv4f32" - )] - fn _svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; - } - unsafe { _svrecps_f32(op1, op2) } +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svclz_u64_m(svdup_n_u64(0), pg, op) } -#[doc = "Reciprocal step"] +#[doc = "Complex multiply-add with rotate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frecps))] -pub fn svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.frecps.x.nxv2f64" - )] - fn _svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv4f32")] + fn _svcmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; } - unsafe { _svrecps_f64(op1, op2) } + unsafe { _svcmla_f32_m(pg.into(), op1, op2, op3, IMM_ROTATION) } } -#[doc = "Reciprocal exponent"] +#[doc = "Complex multiply-add with rotate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frecpx))] -pub fn svrecpx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.frecpx.x.nxv4f32" - )] - fn _svrecpx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; - } - unsafe { _svrecpx_f32_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, op1, op2, op3) } -#[doc = "Reciprocal exponent"] +#[doc = "Complex multiply-add with rotate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frecpx))] -pub fn svrecpx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { - svrecpx_f32_m(op, pg, op) +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) } -#[doc = "Reciprocal exponent"] +#[doc = "Complex multiply-add with rotate"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frecpx))] -pub fn svrecpx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { - svrecpx_f32_m(svdup_n_f32(0.0), pg, op) +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv2f64")] + fn _svcmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcmla_f64_m(pg.into(), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fcmla.lane.x.nxv4f32" + )] + fn _svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_lane_f32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv4f32")] + fn _svcmpeq_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpeq_f32(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpeq_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv2f64")] + fn _svcmpeq_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpeq_f64(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpeq_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv16i8")] + fn _svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpeq_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpeq_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv8i16")] + fn _svcmpeq_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpeq_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpeq_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv4i32")] + fn _svcmpeq_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpeq_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpeq_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv2i64")] + fn _svcmpeq_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpeq_s64(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpeq_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpeq_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpeq_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpeq_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpeq_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpeq_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpeq_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpeq_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpeq_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv16i8" + )] + fn _svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpeq_wide_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpeq_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv8i16" + )] + fn _svcmpeq_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpeq_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpeq_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv4i32" + )] + fn _svcmpeq_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpeq_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpeq_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv4f32")] + fn _svcmpge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpge_f32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpge_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv2f64")] + fn _svcmpge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpge_f64(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpge_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv16i8")] + fn _svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpge_s8(pg, op1, op2) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpge_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv8i16")] + fn _svcmpge_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpge_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpge_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv4i32")] + fn _svcmpge_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpge_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpge_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv2i64")] + fn _svcmpge_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpge_s64(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpge_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv16i8")] + fn _svcmpge_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpge_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpge_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv8i16")] + fn _svcmpge_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpge_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpge_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv4i32")] + fn _svcmpge_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpge_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpge_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv2i64")] + fn _svcmpge_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpge_u64(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpge_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv16i8" + )] + fn _svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpge_wide_s8(pg, op1, op2) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpge_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv8i16" + )] + fn _svcmpge_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpge_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpge_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv4i32" + )] + fn _svcmpge_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpge_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpge_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv16i8" + )] + fn _svcmpge_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpge_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmpge_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv8i16" + )] + fn _svcmpge_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpge_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmpge_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv4i32" + )] + fn _svcmpge_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpge_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmpge_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv4f32")] + fn _svcmpgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpgt_f32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv2f64")] + fn _svcmpgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpgt_f64(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv16i8")] + fn _svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpgt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv8i16")] + fn _svcmpgt_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpgt_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpgt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv4i32")] + fn _svcmpgt_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpgt_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpgt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv2i64")] + fn _svcmpgt_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpgt_s64(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpgt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv16i8")] + fn _svcmpgt_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpgt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv8i16")] + fn _svcmpgt_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpgt_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpgt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv4i32")] + fn _svcmpgt_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpgt_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpgt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv2i64")] + fn _svcmpgt_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpgt_u64(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpgt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv16i8" + )] + fn _svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpgt_wide_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpgt_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv8i16" + )] + fn _svcmpgt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpgt_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpgt_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv4i32" + )] + fn _svcmpgt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpgt_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpgt_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv16i8" + )] + fn _svcmpgt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpgt_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmpgt_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv8i16" + )] + fn _svcmpgt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpgt_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmpgt_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv4i32" + )] + fn _svcmpgt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpgt_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmpgt_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpge_f32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmple_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpge_f64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmple_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpge_s8(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmple_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpge_s16(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmple_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpge_s32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmple_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpge_s64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmple_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpge_u8(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmple_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpge_u16(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmple_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpge_u32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmple_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpge_u64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmple_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv16i8" + )] + fn _svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmple_wide_s8(pg, op1, op2) } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmple_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv8i16" + )] + fn _svcmple_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmple_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmple_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv4i32" + )] + fn _svcmple_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmple_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmple_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv16i8" + )] + fn _svcmple_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmple_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmple_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv8i16" + )] + fn _svcmple_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmple_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmple_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv4i32" + )] + fn _svcmple_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmple_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmple_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpgt_f32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmplt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpgt_f64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmplt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpgt_s8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmplt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpgt_s16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmplt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpgt_s32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmplt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpgt_s64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmplt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpgt_u8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmplt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpgt_u16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmplt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpgt_u32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmplt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpgt_u64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmplt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv16i8" + )] + fn _svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmplt_wide_s8(pg, op1, op2) } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmplt_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv8i16" + )] + fn _svcmplt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmplt_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmplt_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv4i32" + )] + fn _svcmplt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmplt_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmplt_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv16i8" + )] + fn _svcmplt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmplt_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmplt_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv8i16" + )] + fn _svcmplt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmplt_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmplt_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv4i32" + )] + fn _svcmplt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmplt_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "Compare less than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmplt_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv4f32")] + fn _svcmpne_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpne_f32(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpne_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv2f64")] + fn _svcmpne_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpne_f64(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpne_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv16i8")] + fn _svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpne_s8(pg, op1, op2) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpne_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv8i16")] + fn _svcmpne_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpne_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpne_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv4i32")] + fn _svcmpne_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpne_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpne_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv2i64")] + fn _svcmpne_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpne_s64(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpne_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpne_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpne_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpne_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpne_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpne_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpne_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpne_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpne_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv16i8" + )] + fn _svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpne_wide_s8(pg, op1, op2) } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpne_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv8i16" + )] + fn _svcmpne_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpne_wide_s16(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpne_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv4i32" + )] + fn _svcmpne_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpne_wide_s32(pg.into(), op1, op2).into() } +} +#[doc = "Compare not equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpne_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare unordered with"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv4f32")] + fn _svcmpuo_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpuo_f32(pg.into(), op1, op2).into() } +} +#[doc = "Compare unordered with"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpuo_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare unordered with"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv2f64")] + fn _svcmpuo_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpuo_f64(pg.into(), op1, op2).into() } +} +#[doc = "Compare unordered with"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpuo_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv16i8")] + fn _svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcnot_s8_m(inactive, pg, op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv8i16")] + fn _svcnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcnot_s16_m(inactive, pg.into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv4i32")] + fn _svcnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcnot_s32_m(inactive, pg.into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv2i64")] + fn _svcnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcnot_s64_m(inactive, pg.into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4f32")] + fn _svcnt_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcnt_f32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe { svcnt_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + svcnt_f32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2f64")] + fn _svcnt_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcnt_f64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe { svcnt_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + svcnt_f64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv16i8")] + fn _svcnt_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcnt_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svcnt_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svcnt_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv8i16")] + fn _svcnt_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcnt_s16_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svcnt_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svcnt_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4i32")] + fn _svcnt_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcnt_s32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svcnt_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svcnt_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2i64")] + fn _svcnt_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcnt_s64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svcnt_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svcnt_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnt_s8_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnt_u8_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnt_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnt_s16_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnt_u16_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnt_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnt_s32_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnt_u32_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnt_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnt_s64_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnt_u64_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnt_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count the number of 8-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svcntb() -> u64 { + svcntb_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 16-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svcnth() -> u64 { + svcnth_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 32-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svcntw() -> u64 { + svcntw_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 64-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svcntd() -> u64 { + svcntd_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 8-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb_pat)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (rdvl , PATTERN = { svpattern :: SV_ALL }))] +# [cfg_attr (test , assert_instr (cntb , PATTERN = { svpattern :: SV_MUL4 }))] +pub fn svcntb_pat() -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntb")] + fn _svcntb_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntb_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 16-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth_pat)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (cnth , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcnth_pat() -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnth")] + fn _svcnth_pat(pattern: svpattern) -> i64; + } + unsafe { _svcnth_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 32-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw_pat)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (cntw , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcntw_pat() -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntw")] + fn _svcntw_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntw_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 64-bit elements in a vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd_pat)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (cntd , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcntd_pat() -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntd")] + fn _svcntd_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntd_pat(PATTERN).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv16i1")] + fn _svcntp_b8(pg: svbool_t, op: svbool_t) -> i64; + } + unsafe { _svcntp_b8(pg, op).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv8i1")] + fn _svcntp_b16(pg: svbool8_t, op: svbool8_t) -> i64; + } + unsafe { _svcntp_b16(pg.into(), op.into()).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv4i1")] + fn _svcntp_b32(pg: svbool4_t, op: svbool4_t) -> i64; + } + unsafe { _svcntp_b32(pg.into(), op.into()).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv2i1")] + fn _svcntp_b64(pg: svbool2_t, op: svbool2_t) -> i64; + } + unsafe { _svcntp_b64(pg.into(), op.into()).as_unsigned() } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv4f32" + )] + fn _svcompact_f32(pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svcompact_f32(pg.into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f64(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv2f64" + )] + fn _svcompact_f64(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svcompact_f64(pg.into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv4i32" + )] + fn _svcompact_s32(pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcompact_s32(pg.into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s64(pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv2i64" + )] + fn _svcompact_s64(pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcompact_s64(pg.into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcompact_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcompact_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv8f32.nxv4f32" + )] + fn _svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t; + } + unsafe { _svcreate2_f32(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv4f64.nxv2f64" + )] + fn _svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t; + } + unsafe { _svcreate2_f64(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv32i8.nxv16i8" + )] + fn _svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t; + } + unsafe { _svcreate2_s8(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv16i16.nxv8i16" + )] + fn _svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t; + } + unsafe { _svcreate2_s16(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv8i32.nxv4i32" + )] + fn _svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t; + } + unsafe { _svcreate2_s32(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create2.nxv4i64.nxv2i64" + )] + fn _svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t; + } + unsafe { _svcreate2_s64(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_u8(x0: svuint8_t, x1: svuint8_t) -> svuint8x2_t { + unsafe { svcreate2_s8(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_u16(x0: svuint16_t, x1: svuint16_t) -> svuint16x2_t { + unsafe { svcreate2_s16(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_u32(x0: svuint32_t, x1: svuint32_t) -> svuint32x2_t { + unsafe { svcreate2_s32(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate2_u64(x0: svuint64_t, x1: svuint64_t) -> svuint64x2_t { + unsafe { svcreate2_s64(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv12f32.nxv4f32" + )] + fn _svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t; + } + unsafe { _svcreate3_f32(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svfloat64x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv6f64.nxv2f64" + )] + fn _svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svfloat64x3_t; + } + unsafe { _svcreate3_f64(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv48i8.nxv16i8" + )] + fn _svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t; + } + unsafe { _svcreate3_s8(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv24i16.nxv8i16" + )] + fn _svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t; + } + unsafe { _svcreate3_s16(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv12i32.nxv4i32" + )] + fn _svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t; + } + unsafe { _svcreate3_s32(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create3.nxv6i64.nxv2i64" + )] + fn _svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t; + } + unsafe { _svcreate3_s64(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t) -> svuint8x3_t { + unsafe { svcreate3_s8(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_u16(x0: svuint16_t, x1: svuint16_t, x2: svuint16_t) -> svuint16x3_t { + unsafe { svcreate3_s16(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_u32(x0: svuint32_t, x1: svuint32_t, x2: svuint32_t) -> svuint32x3_t { + unsafe { svcreate3_s32(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate3_u64(x0: svuint64_t, x1: svuint64_t, x2: svuint64_t) -> svuint64x3_t { + unsafe { svcreate3_s64(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_f32( + x0: svfloat32_t, + x1: svfloat32_t, + x2: svfloat32_t, + x3: svfloat32_t, +) -> svfloat32x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv16f32.nxv4f32" + )] + fn _svcreate4_f32( + x0: svfloat32_t, + x1: svfloat32_t, + x2: svfloat32_t, + x3: svfloat32_t, + ) -> svfloat32x4_t; + } + unsafe { _svcreate4_f32(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_f64( + x0: svfloat64_t, + x1: svfloat64_t, + x2: svfloat64_t, + x3: svfloat64_t, +) -> svfloat64x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv8f64.nxv2f64" + )] + fn _svcreate4_f64( + x0: svfloat64_t, + x1: svfloat64_t, + x2: svfloat64_t, + x3: svfloat64_t, + ) -> svfloat64x4_t; + } + unsafe { _svcreate4_f64(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> svint8x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv64i8.nxv16i8" + )] + fn _svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> svint8x4_t; + } + unsafe { _svcreate4_s8(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t, x3: svint16_t) -> svint16x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv32i16.nxv8i16" + )] + fn _svcreate4_s16( + x0: svint16_t, + x1: svint16_t, + x2: svint16_t, + x3: svint16_t, + ) -> svint16x4_t; + } + unsafe { _svcreate4_s16(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t, x3: svint32_t) -> svint32x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv16i32.nxv4i32" + )] + fn _svcreate4_s32( + x0: svint32_t, + x1: svint32_t, + x2: svint32_t, + x3: svint32_t, + ) -> svint32x4_t; + } + unsafe { _svcreate4_s32(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t, x3: svint64_t) -> svint64x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.create4.nxv8i64.nxv2i64" + )] + fn _svcreate4_s64( + x0: svint64_t, + x1: svint64_t, + x2: svint64_t, + x3: svint64_t, + ) -> svint64x4_t; + } + unsafe { _svcreate4_s64(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t, x3: svuint8_t) -> svuint8x4_t { + unsafe { + svcreate4_s8( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_u16( + x0: svuint16_t, + x1: svuint16_t, + x2: svuint16_t, + x3: svuint16_t, +) -> svuint16x4_t { + unsafe { + svcreate4_s16( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_u32( + x0: svuint32_t, + x1: svuint32_t, + x2: svuint32_t, + x3: svuint32_t, +) -> svuint32x4_t { + unsafe { + svcreate4_s32( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Create a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svcreate4_u64( + x0: svuint64_t, + x1: svuint64_t, + x2: svuint64_t, + x3: svuint64_t, +) -> svuint64x4_t { + unsafe { + svcreate4_s64( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f32f64")] + fn _svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_f64_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe { svcvt_f32_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvt_f32_f64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f64f32")] + fn _svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_f32_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe { svcvt_f64_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + svcvt_f64_f32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s32_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe { svcvt_f32_s32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { + svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")] + fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s64_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe { svcvt_f32_s64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { + svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32" + )] + fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u32_m(inactive, pg.into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe { svcvt_f32_u32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")] + fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u64_m(inactive, pg.into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe { svcvt_f32_u64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s32_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe { svcvt_f64_s32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { + svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s64_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe { svcvt_f64_s64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { + svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv4i32" + )] + fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u32_m(inactive, pg.into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe { svcvt_f64_u32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64" + )] + fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u64_m(inactive, pg.into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe { svcvt_f64_u64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f32")] + fn _svcvt_s32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcvt_s32_f32_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe { svcvt_s32_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { + svcvt_s32_f32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_m(inactive: svint32_t, pg: svbool_t, op: svfloat64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f64")] + fn _svcvt_s32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t; + } + unsafe { _svcvt_s32_f64_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_x(pg: svbool_t, op: svfloat64_t) -> svint32_t { + unsafe { svcvt_s32_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_z(pg: svbool_t, op: svfloat64_t) -> svint32_t { + svcvt_s32_f64_m(svdup_n_s32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_m(inactive: svint64_t, pg: svbool_t, op: svfloat32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f32")] + fn _svcvt_s64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t; + } + unsafe { _svcvt_s64_f32_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_x(pg: svbool_t, op: svfloat32_t) -> svint64_t { + unsafe { svcvt_s64_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_z(pg: svbool_t, op: svfloat32_t) -> svint64_t { + svcvt_s64_f32_m(svdup_n_s64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f64")] + fn _svcvt_s64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcvt_s64_f64_m(inactive, pg.into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe { svcvt_s64_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { + svcvt_s64_f64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f32")] + fn _svcvt_u32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcvt_u32_f32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe { svcvt_u32_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + svcvt_u32_f32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_m(inactive: svuint32_t, pg: svbool_t, op: svfloat64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f64")] + fn _svcvt_u32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t; + } + unsafe { _svcvt_u32_f64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint32_t { + unsafe { svcvt_u32_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint32_t { + svcvt_u32_f64_m(svdup_n_u32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_m(inactive: svuint64_t, pg: svbool_t, op: svfloat32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f32")] + fn _svcvt_u64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t; + } + unsafe { _svcvt_u64_f32_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint64_t { + unsafe { svcvt_u64_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint64_t { + svcvt_u64_f32_m(svdup_n_u64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f64")] + fn _svcvt_u64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcvt_u64_f64_m(inactive.as_signed(), pg.into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe { svcvt_u64_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + svcvt_u64_f64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv4f32")] + fn _svdiv_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svdiv_f32_m(pg.into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdiv_f32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdiv_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv2f64")] + fn _svdiv_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svdiv_f64_m(pg.into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdiv_f64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdiv_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")] + fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_s32_m(pg.into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv2i64")] + fn _svdiv_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdiv_s64_m(pg.into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdiv_s64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdiv_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv4i32")] + fn _svdiv_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdiv_u32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdiv_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv2i64")] + fn _svdiv_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdiv_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdiv_u64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdiv_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Divide"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv4f32")] + fn _svdivr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svdivr_f32_m(pg.into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdivr_f32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdivr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv2f64")] + fn _svdivr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svdivr_f64_m(pg.into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdivr_f64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdivr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv4i32")] + fn _svdivr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdivr_s32_m(pg.into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdivr_s32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdivr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv2i64")] + fn _svdivr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdivr_s64_m(pg.into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdivr_s64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdivr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv4i32")] + fn _svdivr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdivr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdivr_u32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdivr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv2i64")] + fn _svdivr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdivr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdivr_u64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdivr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] +pub fn svdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sdot.lane.nxv4i32" + )] + fn _svdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svdot_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] +pub fn svdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sdot.lane.nxv2i64" + )] + fn _svdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + ) -> svint64_t; + } + unsafe { _svdot_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] +pub fn svdot_lane_u32( + op1: svuint32_t, + op2: svuint8_t, + op3: svuint8_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.udot.lane.nxv4i32" + )] + fn _svdot_lane_u32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { + _svdot_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned() + } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] +pub fn svdot_lane_u64( + op1: svuint64_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.udot.lane.nxv2i64" + )] + fn _svdot_lane_u64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + ) -> svint64_t; + } + unsafe { + _svdot_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned() + } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv4i32")] + fn _svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svdot_s32(op1, op2, op3) } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_n_s32(op1: svint32_t, op2: svint8_t, op3: i8) -> svint32_t { + svdot_s32(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv2i64")] + fn _svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t; + } + unsafe { _svdot_s64(op1, op2, op3) } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_n_s64(op1: svint64_t, op2: svint16_t, op3: i16) -> svint64_t { + svdot_s64(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv4i32")] + fn _svdot_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svdot_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_n_u32(op1: svuint32_t, op2: svuint8_t, op3: u8) -> svuint32_t { + svdot_u32(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_u64(op1: svuint64_t, op2: svuint16_t, op3: svuint16_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv2i64")] + fn _svdot_u64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t; + } + unsafe { _svdot_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_n_u64(op1: svuint64_t, op2: svuint16_t, op3: u16) -> svuint64_t { + svdot_u64(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_f32(data: svfloat32_t, index: u32) -> svfloat32_t { + svtbl_f32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { + svtbl_f64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s8(data: svint8_t, index: u8) -> svint8_t { + svtbl_s8(data, svdup_n_u8(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s16(data: svint16_t, index: u16) -> svint16_t { + svtbl_s16(data, svdup_n_u16(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s32(data: svint32_t, index: u32) -> svint32_t { + svtbl_s32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s64(data: svint64_t, index: u64) -> svint64_t { + svtbl_s64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u8(data: svuint8_t, index: u8) -> svuint8_t { + svtbl_u8(data, svdup_n_u8(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u16(data: svuint16_t, index: u16) -> svuint16_t { + svtbl_u16(data, svdup_n_u16(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u32(data: svuint32_t, index: u32) -> svuint32_t { + svtbl_u32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { + svtbl_u64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b8(op: bool) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i1")] + fn _svdup_n_b8(op: bool) -> svbool_t; + } + unsafe { _svdup_n_b8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b16(op: bool) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i1")] + fn _svdup_n_b16(op: bool) -> svbool8_t; + } + unsafe { _svdup_n_b16(op).into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b32(op: bool) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i1")] + fn _svdup_n_b32(op: bool) -> svbool4_t; + } + unsafe { _svdup_n_b32(op).into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b64(op: bool) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i1")] + fn _svdup_n_b64(op: bool) -> svbool2_t; + } + unsafe { _svdup_n_b64(op).into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32(op: f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] + fn _svdup_n_f32(op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64(op: f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")] + fn _svdup_n_f64(op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8(op: i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")] + fn _svdup_n_s8(op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16(op: i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")] + fn _svdup_n_s16(op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32(op: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64(op: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] + fn _svdup_n_s64(op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8(op: u8) -> svuint8_t { + unsafe { svdup_n_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16(op: u16) -> svuint16_t { + unsafe { svdup_n_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32(op: u32) -> svuint32_t { + unsafe { svdup_n_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64(op: u64) -> svuint64_t { + unsafe { svdup_n_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_m(inactive: svfloat32_t, pg: svbool_t, op: f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4f32")] + fn _svdup_n_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32_m(inactive, pg.into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_x(pg: svbool_t, op: f32) -> svfloat32_t { + svdup_n_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_z(pg: svbool_t, op: f32) -> svfloat32_t { + svdup_n_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_m(inactive: svfloat64_t, pg: svbool_t, op: f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2f64")] + fn _svdup_n_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64_m(inactive, pg.into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_x(pg: svbool_t, op: f64) -> svfloat64_t { + svdup_n_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_z(pg: svbool_t, op: f64) -> svfloat64_t { + svdup_n_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv16i8")] + fn _svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8_m(inactive, pg, op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_x(pg: svbool_t, op: i8) -> svint8_t { + svdup_n_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_z(pg: svbool_t, op: i8) -> svint8_t { + svdup_n_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_m(inactive: svint16_t, pg: svbool_t, op: i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv8i16")] + fn _svdup_n_s16_m(inactive: svint16_t, pg: svbool8_t, op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16_m(inactive, pg.into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_x(pg: svbool_t, op: i16) -> svint16_t { + svdup_n_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_z(pg: svbool_t, op: i16) -> svint16_t { + svdup_n_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_m(inactive: svint32_t, pg: svbool_t, op: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4i32")] + fn _svdup_n_s32_m(inactive: svint32_t, pg: svbool4_t, op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32_m(inactive, pg.into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_x(pg: svbool_t, op: i32) -> svint32_t { + svdup_n_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_z(pg: svbool_t, op: i32) -> svint32_t { + svdup_n_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_m(inactive: svint64_t, pg: svbool_t, op: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2i64")] + fn _svdup_n_s64_m(inactive: svint64_t, pg: svbool2_t, op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64_m(inactive, pg.into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_x(pg: svbool_t, op: i64) -> svint64_t { + svdup_n_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_z(pg: svbool_t, op: i64) -> svint64_t { + svdup_n_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_m(inactive: svuint8_t, pg: svbool_t, op: u8) -> svuint8_t { + unsafe { svdup_n_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_x(pg: svbool_t, op: u8) -> svuint8_t { + svdup_n_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_z(pg: svbool_t, op: u8) -> svuint8_t { + svdup_n_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_m(inactive: svuint16_t, pg: svbool_t, op: u16) -> svuint16_t { + unsafe { svdup_n_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_x(pg: svbool_t, op: u16) -> svuint16_t { + svdup_n_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_z(pg: svbool_t, op: u16) -> svuint16_t { + svdup_n_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_m(inactive: svuint32_t, pg: svbool_t, op: u32) -> svuint32_t { + unsafe { svdup_n_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_x(pg: svbool_t, op: u32) -> svuint32_t { + svdup_n_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_z(pg: svbool_t, op: u32) -> svuint32_t { + svdup_n_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_m(inactive: svuint64_t, pg: svbool_t, op: u64) -> svuint64_t { + unsafe { svdup_n_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_x(pg: svbool_t, op: u64) -> svuint64_t { + svdup_n_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_z(pg: svbool_t, op: u64) -> svuint64_t { + svdup_n_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_f32(data: svfloat32_t, index: u64) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv4f32" + )] + fn _svdupq_lane_f32(data: svfloat32_t, index: i64) -> svfloat32_t; + } + unsafe { _svdupq_lane_f32(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv2f64" + )] + fn _svdupq_lane_f64(data: svfloat64_t, index: i64) -> svfloat64_t; + } + unsafe { _svdupq_lane_f64(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s8(data: svint8_t, index: u64) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv16i8" + )] + fn _svdupq_lane_s8(data: svint8_t, index: i64) -> svint8_t; + } + unsafe { _svdupq_lane_s8(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s16(data: svint16_t, index: u64) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv8i16" + )] + fn _svdupq_lane_s16(data: svint16_t, index: i64) -> svint16_t; + } + unsafe { _svdupq_lane_s16(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s32(data: svint32_t, index: u64) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv4i32" + )] + fn _svdupq_lane_s32(data: svint32_t, index: i64) -> svint32_t; + } + unsafe { _svdupq_lane_s32(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s64(data: svint64_t, index: u64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv2i64" + )] + fn _svdupq_lane_s64(data: svint64_t, index: i64) -> svint64_t; + } + unsafe { _svdupq_lane_s64(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u8(data: svuint8_t, index: u64) -> svuint8_t { + unsafe { svdupq_lane_s8(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u16(data: svuint16_t, index: u64) -> svuint16_t { + unsafe { svdupq_lane_s16(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u32(data: svuint32_t, index: u64) -> svuint32_t { + unsafe { svdupq_lane_s32(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { + unsafe { svdupq_lane_s64(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_b16( + x0: bool, + x1: bool, + x2: bool, + x3: bool, + x4: bool, + x5: bool, + x6: bool, + x7: bool, +) -> svbool_t { + let op1 = svdupq_n_s16( + x0 as i16, x1 as i16, x2 as i16, x3 as i16, x4 as i16, x5 as i16, x6 as i16, x7 as i16, + ); + svcmpne_wide_s16(svptrue_b16(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_b32(x0: bool, x1: bool, x2: bool, x3: bool) -> svbool_t { + let op1 = svdupq_n_s32(x0 as i32, x1 as i32, x2 as i32, x3 as i32); + svcmpne_wide_s32(svptrue_b32(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_b64(x0: bool, x1: bool) -> svbool_t { + let op1 = svdupq_n_s64(x0 as i64, x1 as i64); + svcmpne_s64(svptrue_b64(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_b8( + x0: bool, + x1: bool, + x2: bool, + x3: bool, + x4: bool, + x5: bool, + x6: bool, + x7: bool, + x8: bool, + x9: bool, + x10: bool, + x11: bool, + x12: bool, + x13: bool, + x14: bool, + x15: bool, +) -> svbool_t { + let op1 = svdupq_n_s8( + x0 as i8, x1 as i8, x2 as i8, x3 as i8, x4 as i8, x5 as i8, x6 as i8, x7 as i8, x8 as i8, + x9 as i8, x10 as i8, x11 as i8, x12 as i8, x13 as i8, x14 as i8, x15 as i8, + ); + svcmpne_wide_s8(svptrue_b8(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f32)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_f32(x0: f32, x1: f32, x2: f32, x3: f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv4f32.v4f32" + )] + fn _svdupq_n_f32(op0: svfloat32_t, op1: float32x4_t, idx: i64) -> svfloat32_t; + } + unsafe { + let op = _svdupq_n_f32( + simd_reinterpret(()), + crate::mem::transmute([x0, x1, x2, x3]), + 0, + ); + svdupq_lane_f32(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s32)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_s32(x0: i32, x1: i32, x2: i32, x3: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv4i32.v4i32" + )] + fn _svdupq_n_s32(op0: svint32_t, op1: int32x4_t, idx: i64) -> svint32_t; + } + unsafe { + let op = _svdupq_n_s32( + simd_reinterpret(()), + crate::mem::transmute([x0, x1, x2, x3]), + 0, + ); + svdupq_lane_s32(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u32)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_u32(x0: u32, x1: u32, x2: u32, x3: u32) -> svuint32_t { + unsafe { + svdupq_n_s32( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f64)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_f64(x0: f64, x1: f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv2f64.v2f64" + )] + fn _svdupq_n_f64(op0: svfloat64_t, op1: float64x2_t, idx: i64) -> svfloat64_t; + } + unsafe { + let op = _svdupq_n_f64(simd_reinterpret(()), crate::mem::transmute([x0, x1]), 0); + svdupq_lane_f64(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s64)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_s64(x0: i64, x1: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv2i64.v2i64" + )] + fn _svdupq_n_s64(op0: svint64_t, op1: int64x2_t, idx: i64) -> svint64_t; + } + unsafe { + let op = _svdupq_n_s64(simd_reinterpret(()), crate::mem::transmute([x0, x1]), 0); + svdupq_lane_s64(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u64)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_u64(x0: u64, x1: u64) -> svuint64_t { + unsafe { svdupq_n_s64(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s16)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_s16( + x0: i16, + x1: i16, + x2: i16, + x3: i16, + x4: i16, + x5: i16, + x6: i16, + x7: i16, +) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv8i16.v8i16" + )] + fn _svdupq_n_s16(op0: svint16_t, op1: int16x8_t, idx: i64) -> svint16_t; + } + unsafe { + let op = _svdupq_n_s16( + simd_reinterpret(()), + crate::mem::transmute([x0, x1, x2, x3, x4, x5, x6, x7]), + 0, + ); + svdupq_lane_s16(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u16)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_u16( + x0: u16, + x1: u16, + x2: u16, + x3: u16, + x4: u16, + x5: u16, + x6: u16, + x7: u16, +) -> svuint16_t { + unsafe { + svdupq_n_s16( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + x4.as_signed(), + x5.as_signed(), + x6.as_signed(), + x7.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s8)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_s8( + x0: i8, + x1: i8, + x2: i8, + x3: i8, + x4: i8, + x5: i8, + x6: i8, + x7: i8, + x8: i8, + x9: i8, + x10: i8, + x11: i8, + x12: i8, + x13: i8, + x14: i8, + x15: i8, +) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv16i8.v16i8" + )] + fn _svdupq_n_s8(op0: svint8_t, op1: int8x16_t, idx: i64) -> svint8_t; + } + unsafe { + let op = _svdupq_n_s8( + simd_reinterpret(()), + crate::mem::transmute([ + x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, + ]), + 0, + ); + svdupq_lane_s8(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u8)"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svdupq_n_u8( + x0: u8, + x1: u8, + x2: u8, + x3: u8, + x4: u8, + x5: u8, + x6: u8, + x7: u8, + x8: u8, + x9: u8, + x10: u8, + x11: u8, + x12: u8, + x13: u8, + x14: u8, + x15: u8, +) -> svuint8_t { + unsafe { + svdupq_n_s8( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + x4.as_signed(), + x5.as_signed(), + x6.as_signed(), + x7.as_signed(), + x8.as_signed(), + x9.as_signed(), + x10.as_signed(), + x11.as_signed(), + x12.as_signed(), + x13.as_signed(), + x14.as_signed(), + x15.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.z.nvx16i1")] + fn _sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _sveor_b_z(pg, op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv16i8")] + fn _sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveor_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + sveor_s8_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + sveor_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv8i16")] + fn _sveor_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveor_s16_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + sveor_s16_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + sveor_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv4i32")] + fn _sveor_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveor_s32_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + sveor_s32_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + sveor_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv2i64")] + fn _sveor_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveor_s64_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + sveor_s64_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + sveor_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveor_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + sveor_u8_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + sveor_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveor_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + sveor_u16_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + sveor_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveor_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + sveor_u32_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + sveor_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveor_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + sveor_u64_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + sveor_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv16i8")] + fn _sveorv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _sveorv_s8(pg, op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv8i16")] + fn _sveorv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _sveorv_s16(pg.into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv4i32")] + fn _sveorv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _sveorv_s32(pg.into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv2i64")] + fn _sveorv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _sveorv_s64(pg.into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { sveorv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { sveorv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { sveorv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { sveorv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Floating-point exponential accelerator"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fexpa))] +pub fn svexpa_f32(op: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fexpa.x.nxv4f32 " + )] + fn _svexpa_f32(op: svint32_t) -> svfloat32_t; + } + unsafe { _svexpa_f32(op.as_signed()) } +} +#[doc = "Floating-point exponential accelerator"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fexpa))] +pub fn svexpa_f64(op: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fexpa.x.nxv2f64 " + )] + fn _svexpa_f64(op: svint64_t) -> svfloat64_t; + } + unsafe { _svexpa_f64(op.as_signed()) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM3, 0, 63); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4f32")] + fn _svext_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t; + } + unsafe { _svext_f32(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM3, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2f64")] + fn _svext_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t; + } + unsafe { _svext_f64(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 0, 255); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv16i8")] + fn _svext_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svext_s8(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 0, 127); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv8i16")] + fn _svext_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svext_s16(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 0, 63); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4i32")] + fn _svext_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svext_s32(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2i64")] + fn _svext_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svext_s64(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 0, 255); + unsafe { svext_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 0, 127); + unsafe { svext_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 0, 63); + unsafe { svext_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 0, 31); + unsafe { svext_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv8i16")] + fn _svextb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svextb_s16_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svextb_s16_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svextb_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv4i32")] + fn _svextb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svextb_s32_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svextb_s32_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svextb_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv4i32")] + fn _svexth_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svexth_s32_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svexth_s32_m(op, pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svexth_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv2i64")] + fn _svextb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextb_s64_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svextb_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svextb_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv2i64")] + fn _svexth_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svexth_s64_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svexth_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svexth_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtw.nxv2i64")] + fn _svextw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextw_s64_m(inactive, pg.into(), op) } +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svextw_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svextw_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv8i16")] + fn _svextb_u16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svextb_u16_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svextb_u16_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svextb_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv4i32")] + fn _svextb_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svextb_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svextb_u32_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svextb_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv4i32")] + fn _svexth_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svexth_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svexth_u32_m(op, pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svexth_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv2i64")] + fn _svextb_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextb_u64_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextb_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextb_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv2i64")] + fn _svexth_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svexth_u64_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svexth_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svexth_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtw.nxv2i64")] + fn _svextw_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextw_u64_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextw_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextw_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_f32(tuple: svfloat32x2_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4f32.nxv8f32" + )] + fn _svget2_f32(tuple: svfloat32x2_t, imm_index: i32) -> svfloat32_t; + } + unsafe { _svget2_f32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_f64(tuple: svfloat64x2_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2f64.nxv4f64" + )] + fn _svget2_f64(tuple: svfloat64x2_t, imm_index: i32) -> svfloat64_t; + } + unsafe { _svget2_f64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_s8(tuple: svint8x2_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv16i8.nxv32i8" + )] + fn _svget2_s8(tuple: svint8x2_t, imm_index: i32) -> svint8_t; + } + unsafe { _svget2_s8(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_s16(tuple: svint16x2_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv8i16.nxv16i16" + )] + fn _svget2_s16(tuple: svint16x2_t, imm_index: i32) -> svint16_t; + } + unsafe { _svget2_s16(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_s32(tuple: svint32x2_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4i32.nxv8i32" + )] + fn _svget2_s32(tuple: svint32x2_t, imm_index: i32) -> svint32_t; + } + unsafe { _svget2_s32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_s64(tuple: svint64x2_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2i64.nxv4i64" + )] + fn _svget2_s64(tuple: svint64x2_t, imm_index: i32) -> svint64_t; + } + unsafe { _svget2_s64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_u8(tuple: svuint8x2_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svget2_s8::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_u16(tuple: svuint16x2_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svget2_s16::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_u32(tuple: svuint32x2_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svget2_s32::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget2_u64(tuple: svuint64x2_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svget2_s64::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_f32(tuple: svfloat32x3_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4f32.nxv12f32" + )] + fn _svget3_f32(tuple: svfloat32x3_t, imm_index: i32) -> svfloat32_t; + } + unsafe { _svget3_f32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_f64(tuple: svfloat64x3_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2f64.nxv6f64" + )] + fn _svget3_f64(tuple: svfloat64x3_t, imm_index: i32) -> svfloat64_t; + } + unsafe { _svget3_f64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_s8(tuple: svint8x3_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv16i8.nxv48i8" + )] + fn _svget3_s8(tuple: svint8x3_t, imm_index: i32) -> svint8_t; + } + unsafe { _svget3_s8(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_s16(tuple: svint16x3_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv8i16.nxv24i16" + )] + fn _svget3_s16(tuple: svint16x3_t, imm_index: i32) -> svint16_t; + } + unsafe { _svget3_s16(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_s32(tuple: svint32x3_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4i32.nxv12i32" + )] + fn _svget3_s32(tuple: svint32x3_t, imm_index: i32) -> svint32_t; + } + unsafe { _svget3_s32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_s64(tuple: svint64x3_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2i64.nxv6i64" + )] + fn _svget3_s64(tuple: svint64x3_t, imm_index: i32) -> svint64_t; + } + unsafe { _svget3_s64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_u8(tuple: svuint8x3_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svget3_s8::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_u16(tuple: svuint16x3_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svget3_s16::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_u32(tuple: svuint32x3_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svget3_s32::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget3_u64(tuple: svuint64x3_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svget3_s64::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_f32(tuple: svfloat32x4_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4f32.nxv16f32" + )] + fn _svget4_f32(tuple: svfloat32x4_t, imm_index: i32) -> svfloat32_t; + } + unsafe { _svget4_f32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_f64(tuple: svfloat64x4_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2f64.nxv8f64" + )] + fn _svget4_f64(tuple: svfloat64x4_t, imm_index: i32) -> svfloat64_t; + } + unsafe { _svget4_f64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_s8(tuple: svint8x4_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv16i8.nxv64i8" + )] + fn _svget4_s8(tuple: svint8x4_t, imm_index: i32) -> svint8_t; + } + unsafe { _svget4_s8(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_s16(tuple: svint16x4_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv8i16.nxv32i16" + )] + fn _svget4_s16(tuple: svint16x4_t, imm_index: i32) -> svint16_t; + } + unsafe { _svget4_s16(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_s32(tuple: svint32x4_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv4i32.nxv16i32" + )] + fn _svget4_s32(tuple: svint32x4_t, imm_index: i32) -> svint32_t; + } + unsafe { _svget4_s32(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_s64(tuple: svint64x4_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.get.nxv2i64.nxv8i64" + )] + fn _svget4_s64(tuple: svint64x4_t, imm_index: i32) -> svint64_t; + } + unsafe { _svget4_s64(tuple, IMM_INDEX) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_u8(tuple: svuint8x4_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svget4_s8::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_u16(tuple: svuint16x4_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svget4_s16::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_u32(tuple: svuint32x4_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svget4_s32::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svget4_u64(tuple: svuint64x4_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svget4_s64::(tuple.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s8(base: i8, step: i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv16i8")] + fn _svindex_s8(base: i8, step: i8) -> svint8_t; + } + unsafe { _svindex_s8(base, step) } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s16(base: i16, step: i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv8i16")] + fn _svindex_s16(base: i16, step: i16) -> svint16_t; + } + unsafe { _svindex_s16(base, step) } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s32(base: i32, step: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv4i32")] + fn _svindex_s32(base: i32, step: i32) -> svint32_t; + } + unsafe { _svindex_s32(base, step) } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s64(base: i64, step: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv2i64")] + fn _svindex_s64(base: i64, step: i64) -> svint64_t; + } + unsafe { _svindex_s64(base, step) } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u8(base: u8, step: u8) -> svuint8_t { + unsafe { svindex_s8(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u16(base: u16, step: u16) -> svuint16_t { + unsafe { svindex_s16(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u32(base: u32, step: u32) -> svuint32_t { + unsafe { svindex_s32(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u64(base: u64, step: u64) -> svuint64_t { + unsafe { svindex_s64(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4f32")] + fn _svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t; + } + unsafe { _svinsr_n_f32(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2f64")] + fn _svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t; + } + unsafe { _svinsr_n_f64(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv16i8")] + fn _svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t; + } + unsafe { _svinsr_n_s8(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv8i16")] + fn _svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t; + } + unsafe { _svinsr_n_s16(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4i32")] + fn _svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t; + } + unsafe { _svinsr_n_s32(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2i64")] + fn _svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t; + } + unsafe { _svinsr_n_s64(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + unsafe { svinsr_n_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + unsafe { svinsr_n_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + unsafe { svinsr_n_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + unsafe { svinsr_n_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4f32")] + fn _svlasta_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svlasta_f32(pg.into(), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2f64")] + fn _svlasta_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlasta_f64(pg.into(), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv16i8")] + fn _svlasta_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlasta_s8(pg, op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv8i16")] + fn _svlasta_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svlasta_s16(pg.into(), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4i32")] + fn _svlasta_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svlasta_s32(pg.into(), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2i64")] + fn _svlasta_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlasta_s64(pg.into(), op) } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svlasta_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svlasta_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svlasta_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svlasta_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4f32")] + fn _svlastb_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svlastb_f32(pg.into(), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2f64")] + fn _svlastb_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlastb_f64(pg.into(), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv16i8")] + fn _svlastb_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlastb_s8(pg, op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv8i16")] + fn _svlastb_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svlastb_s16(pg.into(), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4i32")] + fn _svlastb_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svlastb_s32(pg.into(), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2i64")] + fn _svlastb_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlastb_s64(pg.into(), op) } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svlastb_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svlastb_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svlastb_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svlastb_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] + fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1_f32(pg.into(), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] + fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1_f64(pg.into(), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] + fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1_s8(pg, base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] + fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1_s16(pg.into(), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] + fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1_s32(pg.into(), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] + fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1_s64(pg.into(), base) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_f32( + pg: svbool_t, + base: *const f32, + indices: svint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32" + )] + fn _svld1_gather_s32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_s32index_f32(pg.into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_s32( + pg: svbool_t, + base: *const i32, + indices: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32" + )] + fn _svld1_gather_s32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svld1_gather_s32index_s32(pg.into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_u32( + pg: svbool_t, + base: *const u32, + indices: svint32_t, +) -> svuint32_t { + svld1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2f64" + )] + fn _svld1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svld1_gather_s64index_f64(pg.into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i64" + )] + fn _svld1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svld1_gather_s64index_s64(pg.into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svld1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_f32( + pg: svbool_t, + base: *const f32, + indices: svuint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32" + )] + fn _svld1_gather_u32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_u32index_f32(pg.into(), base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_s32( + pg: svbool_t, + base: *const i32, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32" + )] + fn _svld1_gather_u32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svld1_gather_u32index_s32(pg.into(), base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_u32( + pg: svbool_t, + base: *const u32, + indices: svuint32_t, +) -> svuint32_t { + svld1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svld1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svld1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svld1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32" + )] + fn _svld1_gather_s32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_s32offset_f32(pg.into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32" + )] + fn _svld1_gather_s32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svld1_gather_s32offset_s32(pg.into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svint32_t, +) -> svuint32_t { + svld1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2f64" + )] + fn _svld1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svld1_gather_s64offset_f64(pg.into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i64" + )] + fn _svld1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svld1_gather_s64offset_s64(pg.into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svld1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32" + )] + fn _svld1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_u32offset_f32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32" + )] + fn _svld1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svld1_gather_u32offset_s32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svld1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svld1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svld1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svld1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svld1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svld1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svld1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svld1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svld1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svld1_gather_u32base_offset_f32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svld1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svld1_gather_u32base_offset_s32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svld1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svld1_gather_u64base_offset_f64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svld1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svld1_gather_u64base_offset_s64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svld1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svld1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svld1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svld1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svld1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svld1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svld1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svld1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svld1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svld1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4f32")] + fn _svld1ro_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1ro_f32(pg.into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2f64")] + fn _svld1ro_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1ro_f64(pg.into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1rob))] +pub unsafe fn svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv16i8")] + fn _svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1ro_s8(pg, base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1roh))] +pub unsafe fn svld1ro_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv8i16")] + fn _svld1ro_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1ro_s16(pg.into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4i32")] + fn _svld1ro_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1ro_s32(pg.into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2i64")] + fn _svld1ro_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1ro_s64(pg.into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1rob))] +pub unsafe fn svld1ro_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1ro_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1roh))] +pub unsafe fn svld1ro_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1ro_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1ro_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1ro_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4f32")] + fn _svld1rq_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1rq_f32(pg.into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2f64")] + fn _svld1rq_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1rq_f64(pg.into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqb))] +pub unsafe fn svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv16i8")] + fn _svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1rq_s8(pg, base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqh))] +pub unsafe fn svld1rq_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv8i16")] + fn _svld1rq_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1rq_s16(pg.into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4i32")] + fn _svld1rq_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1rq_s32(pg.into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2i64")] + fn _svld1rq_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1rq_s64(pg.into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqb))] +pub unsafe fn svld1rq_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1rq_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqh))] +pub unsafe fn svld1rq_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1rq_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1rq_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1rq_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8" + )] + fn _svld1sb_gather_s32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast(_svld1sb_gather_s32offset_s32(pg.into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16" + )] + fn _svld1sh_gather_s32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svld1sh_gather_s32offset_s32(pg.into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svuint32_t { + svld1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svuint32_t { + svld1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8" + )] + fn _svld1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast(_svld1sb_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16" + )] + fn _svld1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svld1sh_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32" + )] + fn _svld1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svld1sw_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8" + )] + fn _svld1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast(_svld1sb_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16" + )] + fn _svld1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svld1sh_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svld1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svld1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svld1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast(_svld1sb_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svld1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast(_svld1sh_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svld1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast(_svld1sb_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svld1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast(_svld1sh_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svld1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast(_svld1sw_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")] + fn _svld1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast(_svld1sb_s16(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")] + fn _svld1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast(_svld1sb_s32(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")] + fn _svld1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast(_svld1sh_s32(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")] + fn _svld1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast(_svld1sb_s64(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")] + fn _svld1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast(_svld1sh_s64(pg.into(), base)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")] + fn _svld1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast(_svld1sw_s64(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svld1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svld1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svld1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svld1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svld1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svld1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svld1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svld1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svld1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svld1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svld1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svld1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svld1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svld1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svld1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svld1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svld1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svld1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32index_s32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16" + )] + fn _svld1sh_gather_s32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svld1sh_gather_s32index_s32(pg.into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32index_u32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svuint32_t { + svld1sh_gather_s32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16" + )] + fn _svld1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svld1sh_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32" + )] + fn _svld1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svld1sw_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svld1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svld1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32index_s32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16" + )] + fn _svld1sh_gather_u32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svld1sh_gather_u32index_s32( + pg.into(), + base, + indices.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32index_u32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svuint32_t { + svld1sh_gather_u32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svld1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svld1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svld1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svld1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svint32_t { + svld1ub_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svint32_t { + svld1uh_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8" + )] + fn _svld1ub_gather_s32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast::( + _svld1ub_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16" + )] + fn _svld1uh_gather_s32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svld1uh_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8" + )] + fn _svld1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast::( + _svld1ub_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16" + )] + fn _svld1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svld1uh_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32" + )] + fn _svld1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svld1uw_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svld1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svld1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8" + )] + fn _svld1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast::( + _svld1ub_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16" + )] + fn _svld1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svld1uh_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svld1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svld1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast::( + _svld1ub_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svld1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast::( + _svld1uh_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svld1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast::( + _svld1ub_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svld1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast::( + _svld1uh_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svld1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast::( + _svld1uw_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")] + fn _svld1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast::(_svld1ub_s16(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")] + fn _svld1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast::(_svld1ub_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")] + fn _svld1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast::(_svld1uh_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")] + fn _svld1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast::(_svld1ub_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")] + fn _svld1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast::(_svld1uh_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")] + fn _svld1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast::(_svld1uw_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svld1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svld1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svld1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svld1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svld1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svld1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svld1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svld1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svld1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svld1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svld1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svld1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svld1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svld1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svld1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svld1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svld1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svld1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32index_s32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svint32_t { + svld1uh_gather_s32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32index_u32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16" + )] + fn _svld1uh_gather_s32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svld1uh_gather_s32index_u32(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svld1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svld1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16" + )] + fn _svld1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svld1uh_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32" + )] + fn _svld1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svld1uw_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32index_s32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svint32_t { + svld1uh_gather_u32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32index_u32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16" + )] + fn _svld1uh_gather_u32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svld1uh_gather_u32index_u32(pg.into(), base.as_signed(), indices.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svld1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svld1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svld1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svld1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_f32(pg: svbool_t, base: *const f32) -> svfloat32x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv8f32.nxv4i1" + )] + fn _svld2_f32(pg: svbool4_t, base: *const f32) -> svfloat32x2_t; + } + _svld2_f32(pg.into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_f64(pg: svbool_t, base: *const f64) -> svfloat64x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv4f64.nxv2i1" + )] + fn _svld2_f64(pg: svbool2_t, base: *const f64) -> svfloat64x2_t; + } + _svld2_f64(pg.into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv32i8.nxv16i1" + )] + fn _svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t; + } + _svld2_s8(pg, base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_s16(pg: svbool_t, base: *const i16) -> svint16x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv16i16.nxv8i1" + )] + fn _svld2_s16(pg: svbool8_t, base: *const i16) -> svint16x2_t; + } + _svld2_s16(pg.into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_s32(pg: svbool_t, base: *const i32) -> svint32x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv8i32.nxv4i1" + )] + fn _svld2_s32(pg: svbool4_t, base: *const i32) -> svint32x2_t; + } + _svld2_s32(pg.into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_s64(pg: svbool_t, base: *const i64) -> svint64x2_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.nxv4i64.nxv2i1" + )] + fn _svld2_s64(pg: svbool2_t, base: *const i64) -> svint64x2_t; + } + _svld2_s64(pg.into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_u8(pg: svbool_t, base: *const u8) -> svuint8x2_t { + svld2_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_u16(pg: svbool_t, base: *const u16) -> svuint16x2_t { + svld2_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_u32(pg: svbool_t, base: *const u32) -> svuint32x2_t { + svld2_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_u64(pg: svbool_t, base: *const u64) -> svuint64x2_t { + svld2_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x2_t { + svld2_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x2_t { + svld2_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x2_t { + svld2_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x2_t { + svld2_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x2_t { + svld2_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x2_t { + svld2_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x2_t { + svld2_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x2_t { + svld2_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x2_t { + svld2_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x2_t { + svld2_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_f32(pg: svbool_t, base: *const f32) -> svfloat32x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv12f32.nxv4i1" + )] + fn _svld3_f32(pg: svbool4_t, base: *const f32) -> svfloat32x3_t; + } + _svld3_f32(pg.into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_f64(pg: svbool_t, base: *const f64) -> svfloat64x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv6f64.nxv2i1" + )] + fn _svld3_f64(pg: svbool2_t, base: *const f64) -> svfloat64x3_t; + } + _svld3_f64(pg.into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv48i8.nxv16i1" + )] + fn _svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t; + } + _svld3_s8(pg, base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_s16(pg: svbool_t, base: *const i16) -> svint16x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv24i16.nxv8i1" + )] + fn _svld3_s16(pg: svbool8_t, base: *const i16) -> svint16x3_t; + } + _svld3_s16(pg.into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_s32(pg: svbool_t, base: *const i32) -> svint32x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv12i32.nxv4i1" + )] + fn _svld3_s32(pg: svbool4_t, base: *const i32) -> svint32x3_t; + } + _svld3_s32(pg.into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_s64(pg: svbool_t, base: *const i64) -> svint64x3_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.nxv6i64.nxv2i1" + )] + fn _svld3_s64(pg: svbool2_t, base: *const i64) -> svint64x3_t; + } + _svld3_s64(pg.into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_u8(pg: svbool_t, base: *const u8) -> svuint8x3_t { + svld3_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_u16(pg: svbool_t, base: *const u16) -> svuint16x3_t { + svld3_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_u32(pg: svbool_t, base: *const u32) -> svuint32x3_t { + svld3_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_u64(pg: svbool_t, base: *const u64) -> svuint64x3_t { + svld3_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x3_t { + svld3_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x3_t { + svld3_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x3_t { + svld3_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x3_t { + svld3_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x3_t { + svld3_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x3_t { + svld3_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x3_t { + svld3_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x3_t { + svld3_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x3_t { + svld3_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x3_t { + svld3_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_f32(pg: svbool_t, base: *const f32) -> svfloat32x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv16f32.nxv4i1" + )] + fn _svld4_f32(pg: svbool4_t, base: *const f32) -> svfloat32x4_t; + } + _svld4_f32(pg.into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_f64(pg: svbool_t, base: *const f64) -> svfloat64x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv8f64.nxv2i1" + )] + fn _svld4_f64(pg: svbool2_t, base: *const f64) -> svfloat64x4_t; + } + _svld4_f64(pg.into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv64i8.nxv16i1" + )] + fn _svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t; + } + _svld4_s8(pg, base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_s16(pg: svbool_t, base: *const i16) -> svint16x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv32i16.nxv8i1" + )] + fn _svld4_s16(pg: svbool8_t, base: *const i16) -> svint16x4_t; + } + _svld4_s16(pg.into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_s32(pg: svbool_t, base: *const i32) -> svint32x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv16i32.nxv4i1" + )] + fn _svld4_s32(pg: svbool4_t, base: *const i32) -> svint32x4_t; + } + _svld4_s32(pg.into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_s64(pg: svbool_t, base: *const i64) -> svint64x4_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.nxv8i64.nxv2i1" + )] + fn _svld4_s64(pg: svbool2_t, base: *const i64) -> svint64x4_t; + } + _svld4_s64(pg.into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_u8(pg: svbool_t, base: *const u8) -> svuint8x4_t { + svld4_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_u16(pg: svbool_t, base: *const u16) -> svuint16x4_t { + svld4_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_u32(pg: svbool_t, base: *const u32) -> svuint32x4_t { + svld4_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_u64(pg: svbool_t, base: *const u64) -> svuint64x4_t { + svld4_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x4_t { + svld4_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x4_t { + svld4_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x4_t { + svld4_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x4_t { + svld4_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x4_t { + svld4_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x4_t { + svld4_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x4_t { + svld4_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x4_t { + svld4_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x4_t { + svld4_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x4_t { + svld4_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4f32")] + fn _svldff1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldff1_f32(pg.into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2f64")] + fn _svldff1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldff1_f64(pg.into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv16i8")] + fn _svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldff1_s8(pg, base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i16")] + fn _svldff1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldff1_s16(pg.into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i32")] + fn _svldff1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldff1_s32(pg.into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i64")] + fn _svldff1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldff1_s64(pg.into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldff1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldff1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldff1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldff1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_f32( + pg: svbool_t, + base: *const f32, + indices: svint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4f32" + )] + fn _svldff1_gather_s32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_s32index_f32(pg.into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_s32( + pg: svbool_t, + base: *const i32, + indices: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i32" + )] + fn _svldff1_gather_s32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_s32index_s32(pg.into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_u32( + pg: svbool_t, + base: *const u32, + indices: svint32_t, +) -> svuint32_t { + svldff1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2f64" + )] + fn _svldff1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svldff1_gather_s64index_f64(pg.into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i64" + )] + fn _svldff1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svldff1_gather_s64index_s64(pg.into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svldff1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_f32( + pg: svbool_t, + base: *const f32, + indices: svuint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4f32" + )] + fn _svldff1_gather_u32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_u32index_f32(pg.into(), base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_s32( + pg: svbool_t, + base: *const i32, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i32" + )] + fn _svldff1_gather_u32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_u32index_s32(pg.into(), base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_u32( + pg: svbool_t, + base: *const u32, + indices: svuint32_t, +) -> svuint32_t { + svldff1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svldff1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svldff1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svldff1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4f32" + )] + fn _svldff1_gather_s32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_s32offset_f32(pg.into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i32" + )] + fn _svldff1_gather_s32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_s32offset_s32(pg.into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svint32_t, +) -> svuint32_t { + svldff1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2f64" + )] + fn _svldff1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svldff1_gather_s64offset_f64(pg.into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i64" + )] + fn _svldff1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svldff1_gather_s64offset_s64(pg.into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4f32" + )] + fn _svldff1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_u32offset_f32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i32" + )] + fn _svldff1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_u32offset_s32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svldff1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svldff1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svldff1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svldff1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svldff1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svldff1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svldff1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svldff1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svldff1_gather_u32base_offset_f32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svldff1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svldff1_gather_u32base_offset_s32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svldff1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svldff1_gather_u64base_offset_f64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svldff1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svldff1_gather_u64base_offset_s64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldff1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldff1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldff1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldff1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldff1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldff1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldff1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldff1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldff1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldff1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8" + )] + fn _svldff1sb_gather_s32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast(_svldff1sb_gather_s32offset_s32(pg.into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16" + )] + fn _svldff1sh_gather_s32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svldff1sh_gather_s32offset_s32(pg.into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svuint32_t { + svldff1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svuint32_t { + svldff1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8" + )] + fn _svldff1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast(_svldff1sb_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16" + )] + fn _svldff1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svldff1sh_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32" + )] + fn _svldff1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svldff1sw_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8" + )] + fn _svldff1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast(_svldff1sb_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16" + )] + fn _svldff1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svldff1sh_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svldff1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svldff1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldff1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast(_svldff1sb_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldff1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast(_svldff1sh_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldff1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast(_svldff1sb_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldff1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast(_svldff1sh_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldff1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast(_svldff1sw_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")] + fn _svldff1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast(_svldff1sb_s16(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")] + fn _svldff1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast(_svldff1sb_s32(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")] + fn _svldff1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast(_svldff1sh_s32(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")] + fn _svldff1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast(_svldff1sb_s64(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")] + fn _svldff1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast(_svldff1sh_s64(pg.into(), base)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")] + fn _svldff1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast(_svldff1sw_s64(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svldff1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svldff1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svldff1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svldff1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svldff1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svldff1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svldff1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svldff1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svldff1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svldff1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svldff1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svldff1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svldff1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svldff1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svldff1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svldff1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svldff1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svldff1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32index_s32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16" + )] + fn _svldff1sh_gather_s32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svldff1sh_gather_s32index_s32(pg.into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32index_u32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svuint32_t { + svldff1sh_gather_s32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16" + )] + fn _svldff1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svldff1sh_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32" + )] + fn _svldff1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svldff1sw_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32index_s32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16" + )] + fn _svldff1sh_gather_u32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svldff1sh_gather_u32index_s32( + pg.into(), + base, + indices.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32index_u32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svuint32_t { + svldff1sh_gather_u32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svint32_t { + svldff1ub_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svint32_t { + svldff1uh_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8" + )] + fn _svldff1ub_gather_s32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast::( + _svldff1ub_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16" + )] + fn _svldff1uh_gather_s32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svldff1uh_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8" + )] + fn _svldff1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast::( + _svldff1ub_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16" + )] + fn _svldff1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svldff1uh_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32" + )] + fn _svldff1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svldff1uw_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svldff1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svldff1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8" + )] + fn _svldff1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast::( + _svldff1ub_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16" + )] + fn _svldff1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svldff1uh_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldff1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldff1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast::( + _svldff1ub_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldff1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast::( + _svldff1uh_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldff1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast::( + _svldff1ub_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldff1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast::( + _svldff1uh_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldff1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast::( + _svldff1uw_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")] + fn _svldff1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast::(_svldff1ub_s16(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")] + fn _svldff1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast::(_svldff1ub_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")] + fn _svldff1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast::(_svldff1uh_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")] + fn _svldff1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast::(_svldff1ub_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")] + fn _svldff1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast::(_svldff1uh_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")] + fn _svldff1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast::(_svldff1uw_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svldff1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svldff1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svldff1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svldff1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svldff1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svldff1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svldff1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svldff1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svldff1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svldff1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svldff1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svldff1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svldff1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svldff1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svldff1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svldff1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svldff1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svldff1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32index_s32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svint32_t { + svldff1uh_gather_s32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32index_u32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16" + )] + fn _svldff1uh_gather_s32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svldff1uh_gather_s32index_u32(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16" + )] + fn _svldff1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svldff1uh_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32" + )] + fn _svldff1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svldff1uw_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32index_s32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svint32_t { + svldff1uh_gather_u32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32index_u32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16" + )] + fn _svldff1uh_gather_u32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svldff1uh_gather_u32index_u32(pg.into(), base.as_signed(), indices.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4f32")] + fn _svldnf1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldnf1_f32(pg.into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2f64")] + fn _svldnf1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldnf1_f64(pg.into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv16i8")] + fn _svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldnf1_s8(pg, base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i16")] + fn _svldnf1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldnf1_s16(pg.into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i32")] + fn _svldnf1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldnf1_s32(pg.into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i64")] + fn _svldnf1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldnf1_s64(pg.into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldnf1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldnf1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldnf1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldnf1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldnf1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldnf1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldnf1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldnf1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldnf1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldnf1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldnf1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldnf1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldnf1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldnf1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")] + fn _svldnf1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast(_svldnf1sb_s16(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")] + fn _svldnf1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast(_svldnf1sb_s32(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")] + fn _svldnf1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast(_svldnf1sh_s32(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")] + fn _svldnf1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast(_svldnf1sb_s64(pg.into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")] + fn _svldnf1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast(_svldnf1sh_s64(pg.into(), base)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")] + fn _svldnf1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast(_svldnf1sw_s64(pg.into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svldnf1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svldnf1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svldnf1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svldnf1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svldnf1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svldnf1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svldnf1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svldnf1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svldnf1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svldnf1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svldnf1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svldnf1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svldnf1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svldnf1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svldnf1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svldnf1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svldnf1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svldnf1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")] + fn _svldnf1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + simd_cast::(_svldnf1ub_s16(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")] + fn _svldnf1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + simd_cast::(_svldnf1ub_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")] + fn _svldnf1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + simd_cast::(_svldnf1uh_s32(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")] + fn _svldnf1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + simd_cast::(_svldnf1ub_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")] + fn _svldnf1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + simd_cast::(_svldnf1uh_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")] + fn _svldnf1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + simd_cast::(_svldnf1uw_s64(pg.into(), base.as_signed()).as_unsigned()) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svldnf1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svldnf1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svldnf1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svldnf1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svldnf1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svldnf1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svldnf1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svldnf1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svldnf1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svldnf1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svldnf1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svldnf1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svldnf1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svldnf1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svldnf1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svldnf1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svldnf1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svldnf1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4f32")] + fn _svldnt1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldnt1_f32(pg.into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2f64")] + fn _svldnt1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldnt1_f64(pg.into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv16i8")] + fn _svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldnt1_s8(pg, base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv8i16")] + fn _svldnt1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldnt1_s16(pg.into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4i32")] + fn _svldnt1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldnt1_s32(pg.into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2i64")] + fn _svldnt1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldnt1_s64(pg.into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldnt1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldnt1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldnt1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldnt1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_f32(_op: svfloat32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_f64(_op: svfloat64_t) -> u64 { + svcntd() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svlen_s8(_op: svint8_t) -> u64 { + svcntb() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svlen_s16(_op: svint16_t) -> u64 { + svcnth() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_s32(_op: svint32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_s64(_op: svint64_t) -> u64 { + svcntd() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svlen_u8(_op: svuint8_t) -> u64 { + svcntb() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svlen_u16(_op: svuint16_t) -> u64 { + svcnth() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_u32(_op: svuint32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_u64(_op: svuint64_t) -> u64 { + svcntd() +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv16i8")] + fn _svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svlsl_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svlsl_s8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svlsl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv8i16")] + fn _svlsl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svlsl_s16_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svlsl_s16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svlsl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv4i32")] + fn _svlsl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svlsl_s32_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svlsl_s32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svlsl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv2i64")] + fn _svlsl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svlsl_s64_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svlsl_s64_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svlsl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svlsl_s8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsl_u8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svlsl_s16_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsl_u16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svlsl_s32_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsl_u32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svlsl_s64_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsl_u64_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv16i8" + )] + fn _svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svlsl_wide_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svlsl_wide_s8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svlsl_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv8i16" + )] + fn _svlsl_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svlsl_wide_s16_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svlsl_wide_s16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svlsl_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv4i32" + )] + fn _svlsl_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svlsl_wide_s32_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svlsl_wide_s32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svlsl_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + unsafe { svlsl_wide_s8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsl_wide_u8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsl_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + unsafe { svlsl_wide_s16_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsl_wide_u16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsl_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + unsafe { svlsl_wide_s32_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsl_wide_u32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsl_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv16i8")] + fn _svlsr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svlsr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsr_u8_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv8i16")] + fn _svlsr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svlsr_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsr_u16_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv4i32")] + fn _svlsr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svlsr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsr_u32_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv2i64")] + fn _svlsr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svlsr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsr_u64_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv16i8" + )] + fn _svlsr_wide_u8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svlsr_wide_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsr_wide_u8_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsr_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv8i16" + )] + fn _svlsr_wide_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svlsr_wide_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsr_wide_u16_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsr_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv4i32" + )] + fn _svlsr_wide_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svlsr_wide_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsr_wide_u32_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsr_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv4f32")] + fn _svmad_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmad_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmad_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv2f64")] + fn _svmad_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmad_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmad_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv16i8")] + fn _svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmad_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmad_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmad_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv8i16")] + fn _svmad_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmad_s16_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmad_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmad_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv4i32")] + fn _svmad_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmad_s32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmad_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmad_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv2i64")] + fn _svmad_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmad_s64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmad_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmad_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmad_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmad_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmad_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmad_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmad_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmad_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmad_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmad_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmad_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmad_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmad_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmad_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv4f32")] + fn _svmax_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmax_f32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmax_f32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmax_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv2f64")] + fn _svmax_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmax_f64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmax_f64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmax_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv16i8")] + fn _svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmax_s8_m(pg, op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmax_s8_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmax_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv8i16")] + fn _svmax_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmax_s16_m(pg.into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmax_s16_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmax_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv4i32")] + fn _svmax_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmax_s32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmax_s32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmax_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv2i64")] + fn _svmax_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmax_s64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmax_s64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmax_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv16i8")] + fn _svmax_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmax_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmax_u8_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmax_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv8i16")] + fn _svmax_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmax_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmax_u16_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmax_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv4i32")] + fn _svmax_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmax_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmax_u32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmax_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv2i64")] + fn _svmax_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmax_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmax_u64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmax_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Maximum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv4f32")] + fn _svmaxnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxnm_f32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnm_f32_m(pg, op1, op2) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv2f64")] + fn _svmaxnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxnm_f64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnm_f64_m(pg, op1, op2) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Maximum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub fn svmaxnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmv.nxv4f32" + )] + fn _svmaxnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svmaxnmv_f32(pg.into(), op) } +} +#[doc = "Maximum number reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub fn svmaxnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmv.nxv2f64" + )] + fn _svmaxnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svmaxnmv_f64(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub fn svmaxv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv4f32")] + fn _svmaxv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svmaxv_f32(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub fn svmaxv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv2f64")] + fn _svmaxv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svmaxv_f64(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv16i8")] + fn _svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svmaxv_s8(pg, op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv8i16")] + fn _svmaxv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svmaxv_s16(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv4i32")] + fn _svmaxv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svmaxv_s32(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv2i64")] + fn _svmaxv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svmaxv_s64(pg.into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv16i8")] + fn _svmaxv_u8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svmaxv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv8i16")] + fn _svmaxv_u16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svmaxv_u16(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv4i32")] + fn _svmaxv_u32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svmaxv_u32(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv2i64")] + fn _svmaxv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svmaxv_u64(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv4f32")] + fn _svmin_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmin_f32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmin_f32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmin_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv2f64")] + fn _svmin_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmin_f64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmin_f64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmin_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv16i8")] + fn _svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmin_s8_m(pg, op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmin_s8_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmin_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv8i16")] + fn _svmin_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmin_s16_m(pg.into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmin_s16_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmin_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv4i32")] + fn _svmin_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmin_s32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmin_s32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmin_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv2i64")] + fn _svmin_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmin_s64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmin_s64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmin_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv16i8")] + fn _svmin_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmin_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmin_u8_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmin_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv8i16")] + fn _svmin_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmin_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmin_u16_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmin_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv4i32")] + fn _svmin_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmin_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmin_u32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmin_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv2i64")] + fn _svmin_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmin_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmin_u64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmin_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Minimum"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv4f32")] + fn _svminnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminnm_f32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnm_f32_m(pg, op1, op2) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv2f64")] + fn _svminnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminnm_f64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnm_f64_m(pg, op1, op2) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Minimum number"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnmv))] +pub fn svminnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmv.nxv4f32" + )] + fn _svminnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svminnmv_f32(pg.into(), op) } +} +#[doc = "Minimum number reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminnmv))] +pub fn svminnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmv.nxv2f64" + )] + fn _svminnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svminnmv_f64(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminv))] +pub fn svminv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv4f32")] + fn _svminv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svminv_f32(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fminv))] +pub fn svminv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv2f64")] + fn _svminv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svminv_f64(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv16i8")] + fn _svminv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svminv_s8(pg, op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv8i16")] + fn _svminv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svminv_s16(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv4i32")] + fn _svminv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svminv_s32(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv2i64")] + fn _svminv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svminv_s64(pg.into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv16i8")] + fn _svminv_u8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svminv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv8i16")] + fn _svminv_u16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svminv_u16(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv4i32")] + fn _svminv_u32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svminv_u32(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv2i64")] + fn _svminv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svminv_u64(pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv4f32")] + fn _svmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmla_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmla_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv2f64")] + fn _svmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmla_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmla_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv16i8")] + fn _svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmla_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmla_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmla_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv8i16")] + fn _svmla_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmla_s16_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmla_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmla_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv4i32")] + fn _svmla_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmla_s32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmla_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmla_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv2i64")] + fn _svmla_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmla_s64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmla_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmla_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmla_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmla_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmla_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmla_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmla_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmla_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmla_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmla_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmla_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmla_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmla_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmla_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] +pub fn svmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmla.lane.nxv4f32" + )] + fn _svmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + IMM_INDEX: i32, + ) -> svfloat32_t; + } + unsafe { _svmla_lane_f32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] +pub fn svmla_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmla.lane.nxv2f64" + )] + fn _svmla_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + IMM_INDEX: i32, + ) -> svfloat64_t; + } + unsafe { _svmla_lane_f64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv4f32")] + fn _svmls_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmls_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmls_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv2f64")] + fn _svmls_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmls_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmls_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv16i8")] + fn _svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmls_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmls_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmls_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv8i16")] + fn _svmls_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmls_s16_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmls_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmls_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv4i32")] + fn _svmls_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmls_s32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmls_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmls_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv2i64")] + fn _svmls_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmls_s64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmls_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmls_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmls_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmls_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmls_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmls_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmls_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmls_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmls_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmls_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmls_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmls_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmls_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmls_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] +pub fn svmls_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmls.lane.nxv4f32" + )] + fn _svmls_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + IMM_INDEX: i32, + ) -> svfloat32_t; + } + unsafe { _svmls_lane_f32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] +pub fn svmls_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmls.lane.nxv2f64" + )] + fn _svmls_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + IMM_INDEX: i32, + ) -> svfloat64_t; + } + unsafe { _svmls_lane_f64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f32])"] +#[inline] +#[target_feature(enable = "sve,f32mm")] +#[cfg_attr(test, assert_instr(fmmla))] +pub fn svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv4f32")] + fn _svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmmla_f32(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(fmmla))] +pub fn svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv2f64")] + fn _svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmmla_f64(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_s32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(smmla))] +pub fn svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smmla.nxv4i32")] + fn _svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svmmla_s32(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_u32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(ummla))] +pub fn svmmla_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ummla.nxv4i32")] + fn _svmmla_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svmmla_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Move"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmov[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svmov_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + svand_b_z(pg, op, op) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv4f32")] + fn _svmsb_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmsb_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmsb_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv2f64")] + fn _svmsb_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmsb_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmsb_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv16i8")] + fn _svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmsb_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmsb_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmsb_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv8i16")] + fn _svmsb_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmsb_s16_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmsb_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmsb_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv4i32")] + fn _svmsb_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmsb_s32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmsb_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmsb_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv2i64")] + fn _svmsb_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmsb_s64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmsb_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmsb_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmsb_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmsb_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmsb_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmsb_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmsb_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmsb_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmsb_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmsb_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmsb_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmsb_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmsb_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmsb_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")] + fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmul_f32_m(pg.into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")] + fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmul_f64_m(pg.into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")] + fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmul_s8_m(pg, op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")] + fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmul_s16_m(pg.into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")] + fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmul_s32_m(pg.into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")] + fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmul_s64_m(pg.into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv16i8")] + fn _svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmulh_s8_m(pg, op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmulh_s8_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmulh_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv8i16")] + fn _svmulh_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmulh_s16_m(pg.into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmulh_s16_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmulh_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv4i32")] + fn _svmulh_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmulh_s32_m(pg.into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmulh_s32_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmulh_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv2i64")] + fn _svmulh_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmulh_s64_m(pg.into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmulh_s64_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmulh_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv16i8")] + fn _svmulh_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmulh_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmulh_u8_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmulh_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv8i16")] + fn _svmulh_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmulh_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmulh_u16_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmulh_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv4i32")] + fn _svmulh_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmulh_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmulh_u32_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmulh_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv2i64")] + fn _svmulh_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmulh_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmulh_u64_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmulh_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv4f32")] + fn _svmulx_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmulx_f32_m(pg.into(), op1, op2) } +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmulx_f32_m(pg, op1, op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmulx_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv2f64")] + fn _svmulx_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmulx_f64_m(pg.into(), op1, op2) } +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmulx_f64_m(pg, op1, op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmulx_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Bitwise NAND"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnand[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(nand))] +pub fn svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nand.z.nxv16i1")] + fn _svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svnand_b_z(pg, op1, op2) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv4f32")] + fn _svneg_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svneg_f32_m(inactive, pg.into(), op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svneg_f32_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svneg_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv2f64")] + fn _svneg_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svneg_f64_m(inactive, pg.into(), op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svneg_f64_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svneg_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv16i8")] + fn _svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svneg_s8_m(inactive, pg, op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svneg_s8_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svneg_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv8i16")] + fn _svneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svneg_s16_m(inactive, pg.into(), op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svneg_s16_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svneg_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv4i32")] + fn _svneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svneg_s32_m(inactive, pg.into(), op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svneg_s32_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svneg_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv2i64")] + fn _svneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svneg_s64_m(inactive, pg.into(), op) } +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svneg_s64_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svneg_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv4f32")] + fn _svnmad_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmad_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmad_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv2f64")] + fn _svnmad_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmad_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmad_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv4f32")] + fn _svnmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmla_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmla_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv2f64")] + fn _svnmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmla_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmla_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv4f32")] + fn _svnmls_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmls_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmls_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv2f64")] + fn _svnmls_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmls_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmls_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv4f32")] + fn _svnmsb_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmsb_f32_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmsb_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv2f64")] + fn _svnmsb_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmsb_f64_m(pg.into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmsb_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Bitwise NOR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnor[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(nor))] +pub fn svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nor.z.nxv16i1")] + fn _svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svnor_b_z(pg, op1, op2) } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + sveor_b_z(pg, op, pg) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv16i8")] + fn _svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svnot_s8_m(inactive, pg, op) } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svnot_s8_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svnot_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv8i16")] + fn _svnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svnot_s16_m(inactive, pg.into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svnot_s16_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svnot_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv4i32")] + fn _svnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svnot_s32_m(inactive, pg.into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svnot_s32_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svnot_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv2i64")] + fn _svnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svnot_s64_m(inactive, pg.into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svnot_s64_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svnot_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svnot_u8_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svnot_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svnot_u16_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svnot_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svnot_u32_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svnot_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svnot_u64_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svnot_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Bitwise inclusive OR, inverting second argument"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorn[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orn))] +pub fn svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orn.z.nvx16i1")] + fn _svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svorn_b_z(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_b]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.z.nvx16i1")] + fn _svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svorr_b_z(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")] + fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svorr_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")] + fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svorr_s16_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")] + fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svorr_s32_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")] + fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svorr_s64_m(pg.into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv16i8")] + fn _svorv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svorv_s8(pg, op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv8i16")] + fn _svorv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svorv_s16(pg.into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv4i32")] + fn _svorv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svorv_s32(pg.into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv2i64")] + fn _svorv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svorv_s64(pg.into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svorv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svorv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svorv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svorv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Set all predicate elements to false"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfalse[_b])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pfalse))] +pub fn svpfalse_b() -> svbool_t { + svdupq_n_b8( + false, false, false, false, false, false, false, false, false, false, false, false, false, + false, false, false, + ) +} +#[doc = "Set the first active predicate element to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfirst[_b])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pfirst))] +pub fn svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pfirst.nxv16i1")] + fn _svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svpfirst_b(pg, op) } +} +#[doc = "Find next active predicate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv16i1")] + fn _svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svpnext_b8(pg, op) } +} +#[doc = "Find next active predicate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b16(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv8i1")] + fn _svpnext_b16(pg: svbool8_t, op: svbool8_t) -> svbool8_t; + } + unsafe { _svpnext_b16(pg.into(), op.into()).into() } +} +#[doc = "Find next active predicate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b32(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv4i1")] + fn _svpnext_b32(pg: svbool4_t, op: svbool4_t) -> svbool4_t; + } + unsafe { _svpnext_b32(pg.into(), op.into()).into() } +} +#[doc = "Find next active predicate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b64(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv2i1")] + fn _svpnext_b64(pg: svbool2_t, op: svbool2_t) -> svbool2_t; + } + unsafe { _svpnext_b64(pg.into(), op.into()).into() } +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb(pg: svbool_t, base: *const T) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv16i1")] + fn _svprfb(pg: svbool_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfb(pg, base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh(pg: svbool_t, base: *const T) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv8i1")] + fn _svprfh(pg: svbool8_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfh(pg.into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw(pg: svbool_t, base: *const T) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv4i1")] + fn _svprfw(pg: svbool4_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfw(pg.into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd(pg: svbool_t, base: *const T) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv2i1")] + fn _svprfd(pg: svbool2_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfd(pg.into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s32]offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_s32offset( + pg: svbool_t, + base: *const T, + offsets: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.sxtw.index.nxv4i32" + )] + fn _svprfb_gather_s32offset( + pg: svbool4_t, + base: *const crate::ffi::c_void, + offsets: svint32_t, + op: svprfop, + ); + } + _svprfb_gather_s32offset(pg.into(), base as *const crate::ffi::c_void, offsets, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.sxtw.index.nxv4i32" + )] + fn _svprfh_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfh_gather_s32index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.sxtw.index.nxv4i32" + )] + fn _svprfw_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfw_gather_s32index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.sxtw.index.nxv4i32" + )] + fn _svprfd_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfd_gather_s32index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s64]offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_s64offset( + pg: svbool_t, + base: *const T, + offsets: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.index.nxv2i64" + )] + fn _svprfb_gather_s64offset( + pg: svbool2_t, + base: *const crate::ffi::c_void, + offsets: svint64_t, + op: svprfop, + ); + } + _svprfb_gather_s64offset(pg.into(), base as *const crate::ffi::c_void, offsets, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.index.nxv2i64" + )] + fn _svprfh_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfh_gather_s64index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.index.nxv2i64" + )] + fn _svprfw_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfw_gather_s64index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.index.nxv2i64" + )] + fn _svprfd_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfd_gather_s64index(pg.into(), base as *const crate::ffi::c_void, indices, OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u32]offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_u32offset( + pg: svbool_t, + base: *const T, + offsets: svuint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.uxtw.index.nxv4i32" + )] + fn _svprfb_gather_u32offset( + pg: svbool4_t, + base: *const crate::ffi::c_void, + offsets: svint32_t, + op: svprfop, + ); + } + _svprfb_gather_u32offset( + pg.into(), + base as *const crate::ffi::c_void, + offsets.as_signed(), + OP, + ) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.uxtw.index.nxv4i32" + )] + fn _svprfh_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfh_gather_u32index( + pg.into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.uxtw.index.nxv4i32" + )] + fn _svprfw_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfw_gather_u32index( + pg.into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u32]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.uxtw.index.nxv4i32" + )] + fn _svprfd_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfd_gather_u32index( + pg.into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u64]offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_u64offset( + pg: svbool_t, + base: *const T, + offsets: svuint64_t, +) { + svprfb_gather_s64offset::(pg, base, offsets.as_signed()) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfh_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfw_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u64]index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfd_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32" + )] + fn _svprfb_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfb_gather_u32base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32" + )] + fn _svprfh_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfh_gather_u32base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32" + )] + fn _svprfw_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfw_gather_u32base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32" + )] + fn _svprfd_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfd_gather_u32base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64" + )] + fn _svprfb_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfb_gather_u64base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64" + )] + fn _svprfh_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfh_gather_u64base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64" + )] + fn _svprfw_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfw_gather_u64base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64" + )] + fn _svprfd_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfd_gather_u64base(pg.into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base]_offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u32base_offset( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32" + )] + fn _svprfb_gather_u32base_offset(pg: svbool4_t, bases: svint32_t, offset: i64, op: svprfop); + } + _svprfb_gather_u32base_offset(pg.into(), bases.as_signed(), offset, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32" + )] + fn _svprfh_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfh_gather_u32base_index(pg.into(), bases.as_signed(), index.unchecked_shl(1), OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32" + )] + fn _svprfw_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfw_gather_u32base_index(pg.into(), bases.as_signed(), index.unchecked_shl(2), OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32" + )] + fn _svprfd_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfd_gather_u32base_index(pg.into(), bases.as_signed(), index.unchecked_shl(3), OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base]_offset)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u64base_offset( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64" + )] + fn _svprfb_gather_u64base_offset(pg: svbool2_t, bases: svint64_t, offset: i64, op: svprfop); + } + _svprfb_gather_u64base_offset(pg.into(), bases.as_signed(), offset, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64" + )] + fn _svprfh_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfh_gather_u64base_index(pg.into(), bases.as_signed(), index.unchecked_shl(1), OP) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64" + )] + fn _svprfw_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfw_gather_u64base_index(pg.into(), bases.as_signed(), index.unchecked_shl(2), OP) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base]_index)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64" + )] + fn _svprfd_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfd_gather_u64base_index(pg.into(), bases.as_signed(), index.unchecked_shl(3), OP) +} +#[doc = "Prefetch bytes"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_vnum)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfb::(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Prefetch halfwords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_vnum)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfh::(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Prefetch words"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_vnum)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfw::(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Prefetch doublewords"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_vnum)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfd::(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Test whether any active element is true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_any)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_any(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.any.nxv16i1" + )] + fn _svptest_any(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_any(pg, op) } +} +#[doc = "Test whether first active element is true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_first)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_first(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.first.nxv16i1" + )] + fn _svptest_first(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_first(pg, op) } +} +#[doc = "Test whether last active element is true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_last)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_last(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.last.nxv16i1" + )] + fn _svptest_last(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_last(pg, op) } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b8() -> svbool_t { + svptrue_pat_b8::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b16() -> svbool_t { + svptrue_pat_b16::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b32() -> svbool_t { + svptrue_pat_b32::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b64() -> svbool_t { + svptrue_pat_b64::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b8() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")] + fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t; + } + unsafe { _svptrue_pat_b8(PATTERN) } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b16() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")] + fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t; + } + unsafe { _svptrue_pat_b16(PATTERN).into() } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b32() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")] + fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t; + } + unsafe { _svptrue_pat_b32(PATTERN).into() } +} +#[doc = "Set predicate elements to true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b64() -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")] + fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t; + } + unsafe { _svptrue_pat_b64(PATTERN).into() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv16i8" + )] + fn _svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_s8(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv8i16" + )] + fn _svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_s16(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv4i32" + )] + fn _svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_s32(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv2i64" + )] + fn _svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_s64(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv16i8" + )] + fn _svqadd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv8i16" + )] + fn _svqadd_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv4i32" + )] + fn _svqadd_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv2i64" + )] + fn _svqadd_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_s32(op: i32) -> i32 { + svqdecb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_s32(op: i32) -> i32 { + svqdech_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_s32(op: i32) -> i32 { + svqdecw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_s32(op: i32) -> i32 { + svqdecd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_s64(op: i64) -> i64 { + svqdecb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_s64(op: i64) -> i64 { + svqdech_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_s64(op: i64) -> i64 { + svqdecw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_s64(op: i64) -> i64 { + svqdecd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_u32(op: u32) -> u32 { + svqdecb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_u32(op: u32) -> u32 { + svqdech_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_u32(op: u32) -> u32 { + svqdecw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_u32(op: u32) -> u32 { + svqdecd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_u64(op: u64) -> u64 { + svqdecb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_u64(op: u64) -> u64 { + svqdech_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_u64(op: u64) -> u64 { + svqdecw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_u64(op: u64) -> u64 { + svqdecd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n32")] + fn _svqdecb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecb_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n32")] + fn _svqdech_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdech_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n32")] + fn _svqdecw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecw_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n32")] + fn _svqdecd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecd_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n64")] + fn _svqdecb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecb_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n64")] + fn _svqdech_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdech_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n64")] + fn _svqdecw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecw_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n64")] + fn _svqdecd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecd_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n32")] + fn _svqdecb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n32")] + fn _svqdech_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdech_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n32")] + fn _svqdecw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n32")] + fn _svqdecd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n64")] + fn _svqdecb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n64")] + fn _svqdech_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdech_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n64")] + fn _svqdecw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n64")] + fn _svqdecd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_s16( + op: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.nxv8i16")] + fn _svqdech_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqdech_pat_s16(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_s32( + op: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.nxv4i32")] + fn _svqdecw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqdecw_pat_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_s64( + op: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.nxv2i64")] + fn _svqdecd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqdecd_pat_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_u16( + op: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.nxv8i16")] + fn _svqdech_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqdech_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_u32( + op: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.nxv4i32")] + fn _svqdecw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqdecw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_u64( + op: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.nxv2i64")] + fn _svqdecd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqdecd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_s16(op: svint16_t) -> svint16_t { + svqdech_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_s32(op: svint32_t) -> svint32_t { + svqdecw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_s64(op: svint64_t) -> svint64_t { + svqdecd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_u16(op: svuint16_t) -> svuint16_t { + svqdech_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_u32(op: svuint32_t) -> svuint32_t { + svqdecw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_u64(op: svuint64_t) -> svuint64_t { + svqdecd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv16i1" + )] + fn _svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqdecp_n_s32_b8(op, pg) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv8i1" + )] + fn _svqdecp_n_s32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqdecp_n_s32_b16(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv4i1" + )] + fn _svqdecp_n_s32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqdecp_n_s32_b32(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv2i1" + )] + fn _svqdecp_n_s32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqdecp_n_s32_b64(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv16i1" + )] + fn _svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqdecp_n_s64_b8(op, pg) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv8i1" + )] + fn _svqdecp_n_s64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqdecp_n_s64_b16(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv4i1" + )] + fn _svqdecp_n_s64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqdecp_n_s64_b32(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv2i1" + )] + fn _svqdecp_n_s64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqdecp_n_s64_b64(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv16i1" + )] + fn _svqdecp_n_u32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqdecp_n_u32_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv8i1" + )] + fn _svqdecp_n_u32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqdecp_n_u32_b16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv4i1" + )] + fn _svqdecp_n_u32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqdecp_n_u32_b32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv2i1" + )] + fn _svqdecp_n_u32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqdecp_n_u32_b64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv16i1" + )] + fn _svqdecp_n_u64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqdecp_n_u64_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv8i1" + )] + fn _svqdecp_n_u64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqdecp_n_u64_b16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv4i1" + )] + fn _svqdecp_n_u64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqdecp_n_u64_b32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv2i1" + )] + fn _svqdecp_n_u64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqdecp_n_u64_b64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv8i16")] + fn _svqdecp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqdecp_s16(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv4i32")] + fn _svqdecp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqdecp_s32(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv2i64")] + fn _svqdecp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqdecp_s64(op, pg.into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv8i16")] + fn _svqdecp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqdecp_u16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv4i32")] + fn _svqdecp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqdecp_u32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv2i64")] + fn _svqdecp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqdecp_u64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_s32(op: i32) -> i32 { + svqincb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_s32(op: i32) -> i32 { + svqinch_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_s32(op: i32) -> i32 { + svqincw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_s32(op: i32) -> i32 { + svqincd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_s64(op: i64) -> i64 { + svqincb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_s64(op: i64) -> i64 { + svqinch_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_s64(op: i64) -> i64 { + svqincw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_s64(op: i64) -> i64 { + svqincd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_u32(op: u32) -> u32 { + svqincb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_u32(op: u32) -> u32 { + svqinch_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_u32(op: u32) -> u32 { + svqincw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_u32(op: u32) -> u32 { + svqincd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_u64(op: u64) -> u64 { + svqincb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_u64(op: u64) -> u64 { + svqinch_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_u64(op: u64) -> u64 { + svqincw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_u64(op: u64) -> u64 { + svqincd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n32")] + fn _svqincb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincb_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n32")] + fn _svqinch_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqinch_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n32")] + fn _svqincw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincw_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n32")] + fn _svqincd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincd_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n64")] + fn _svqincb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincb_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n64")] + fn _svqinch_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqinch_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n64")] + fn _svqincw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincw_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n64")] + fn _svqincd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincd_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n32")] + fn _svqincb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n32")] + fn _svqinch_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqinch_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n32")] + fn _svqincw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n32")] + fn _svqincd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n64")] + fn _svqincb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n64")] + fn _svqinch_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqinch_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n64")] + fn _svqincw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n64")] + fn _svqincd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_s16( + op: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.nxv8i16")] + fn _svqinch_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqinch_pat_s16(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_s32( + op: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.nxv4i32")] + fn _svqincw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqincw_pat_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_s64( + op: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.nxv2i64")] + fn _svqincd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqincd_pat_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_u16( + op: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.nxv8i16")] + fn _svqinch_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqinch_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_u32( + op: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.nxv4i32")] + fn _svqincw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqincw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_u64( + op: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_FACTOR, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.nxv2i64")] + fn _svqincd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqincd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_s16(op: svint16_t) -> svint16_t { + svqinch_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_s32(op: svint32_t) -> svint32_t { + svqincw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_s64(op: svint64_t) -> svint64_t { + svqincd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_u16(op: svuint16_t) -> svuint16_t { + svqinch_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_u32(op: svuint32_t) -> svuint32_t { + svqincw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_u64(op: svuint64_t) -> svuint64_t { + svqincd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv16i1" + )] + fn _svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqincp_n_s32_b8(op, pg) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv8i1" + )] + fn _svqincp_n_s32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqincp_n_s32_b16(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv4i1" + )] + fn _svqincp_n_s32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqincp_n_s32_b32(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv2i1" + )] + fn _svqincp_n_s32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqincp_n_s32_b64(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv16i1" + )] + fn _svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqincp_n_s64_b8(op, pg) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv8i1" + )] + fn _svqincp_n_s64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqincp_n_s64_b16(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv4i1" + )] + fn _svqincp_n_s64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqincp_n_s64_b32(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv2i1" + )] + fn _svqincp_n_s64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqincp_n_s64_b64(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv16i1" + )] + fn _svqincp_n_u32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqincp_n_u32_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv8i1" + )] + fn _svqincp_n_u32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqincp_n_u32_b16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv4i1" + )] + fn _svqincp_n_u32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqincp_n_u32_b32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv2i1" + )] + fn _svqincp_n_u32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqincp_n_u32_b64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv16i1" + )] + fn _svqincp_n_u64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqincp_n_u64_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv8i1" + )] + fn _svqincp_n_u64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqincp_n_u64_b16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv4i1" + )] + fn _svqincp_n_u64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqincp_n_u64_b32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv2i1" + )] + fn _svqincp_n_u64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqincp_n_u64_b64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv8i16")] + fn _svqincp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqincp_s16(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv4i32")] + fn _svqincp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqincp_s32(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv2i64")] + fn _svqincp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqincp_s64(op, pg.into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv8i16")] + fn _svqincp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqincp_u16(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv4i32")] + fn _svqincp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqincp_u32(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv2i64")] + fn _svqincp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqincp_u64(op.as_signed(), pg.into()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv16i8" + )] + fn _svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_s8(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv8i16" + )] + fn _svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_s16(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv4i32" + )] + fn _svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_s32(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv2i64" + )] + fn _svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_s64(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv16i8" + )] + fn _svqsub_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv8i16" + )] + fn _svqsub_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv4i32" + )] + fn _svqsub_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv2i64" + )] + fn _svqsub_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv16i8")] + fn _svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svrbit_s8_m(inactive, pg, op) } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svrbit_s8_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svrbit_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv8i16")] + fn _svrbit_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svrbit_s16_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svrbit_s16_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svrbit_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv4i32")] + fn _svrbit_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrbit_s32_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrbit_s32_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrbit_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv2i64")] + fn _svrbit_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrbit_s64_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrbit_s64_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrbit_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svrbit_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svrbit_u8_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svrbit_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svrbit_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrbit_u16_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrbit_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrbit_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrbit_u32_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrbit_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrbit_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrbit_u64_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrbit_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Read FFR, returning predicate of succesfully loaded elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rdffr))] +pub fn svrdffr() -> svbool_t { + svrdffr_z(svptrue_b8()) +} +#[doc = "Read FFR, returning predicate of succesfully loaded elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rdffr))] +pub fn svrdffr_z(pg: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rdffr.z")] + fn _svrdffr_z(pg: svbool_t) -> svbool_t; + } + unsafe { _svrdffr_z(pg) } +} +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv4f32" + )] + fn _svrecpe_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpe_f32(op) } +} +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv2f64" + )] + fn _svrecpe_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpe_f64(op) } +} +#[doc = "Reciprocal step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv4f32" + )] + fn _svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecps_f32(op1, op2) } +} +#[doc = "Reciprocal step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv2f64" + )] + fn _svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecps_f64(op1, op2) } +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv4f32" + )] + fn _svrecpx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpx_f32_m(inactive, pg.into(), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv2f64" + )] + fn _svrecpx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpx_f64_m(inactive, pg.into(), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_f32(op: svfloat32_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_f64(op: svfloat64_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_s8(op: svint8_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_s16(op: svint16_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_s32(op: svint32_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_s64(op: svint64_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_u8(op: svuint8_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_u16(op: svuint16_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_u32(op: svuint32_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f32_u64(op: svuint64_t) -> svfloat32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_f32(op: svfloat32_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_f64(op: svfloat64_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_s8(op: svint8_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_s16(op: svint16_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_s32(op: svint32_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_s64(op: svint64_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_u8(op: svuint8_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_u16(op: svuint16_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_u32(op: svuint32_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_f64_u64(op: svuint64_t) -> svfloat64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_f32(op: svfloat32_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_f64(op: svfloat64_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_s8(op: svint8_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_s16(op: svint16_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_s32(op: svint32_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_s64(op: svint64_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_u8(op: svuint8_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_u16(op: svuint16_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_u32(op: svuint32_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s8_u64(op: svuint64_t) -> svint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_f32(op: svfloat32_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_f64(op: svfloat64_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_s8(op: svint8_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_s16(op: svint16_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_s32(op: svint32_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_s64(op: svint64_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_u8(op: svuint8_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_u16(op: svuint16_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_u32(op: svuint32_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s16_u64(op: svuint64_t) -> svint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_f32(op: svfloat32_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_f64(op: svfloat64_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_s8(op: svint8_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_s16(op: svint16_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_s32(op: svint32_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_s64(op: svint64_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_u8(op: svuint8_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_u16(op: svuint16_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_u32(op: svuint32_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s32_u64(op: svuint64_t) -> svint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_f32(op: svfloat32_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_f64(op: svfloat64_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_s8(op: svint8_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_s16(op: svint16_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_s32(op: svint32_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_s64(op: svint64_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_u8(op: svuint8_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_u16(op: svuint16_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_u32(op: svuint32_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_s64_u64(op: svuint64_t) -> svint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_f32(op: svfloat32_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_f64(op: svfloat64_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_s8(op: svint8_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_s16(op: svint16_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_s32(op: svint32_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_s64(op: svint64_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_u8(op: svuint8_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_u16(op: svuint16_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_u32(op: svuint32_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u8_u64(op: svuint64_t) -> svuint8_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_f32(op: svfloat32_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_f64(op: svfloat64_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_s8(op: svint8_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_s16(op: svint16_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_s32(op: svint32_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_s64(op: svint64_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_u8(op: svuint8_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_u16(op: svuint16_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_u32(op: svuint32_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u16_u64(op: svuint64_t) -> svuint16_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_f32(op: svfloat32_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_f64(op: svfloat64_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_s8(op: svint8_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_s16(op: svint16_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_s32(op: svint32_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_s64(op: svint64_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_u8(op: svuint8_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_u16(op: svuint16_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_u32(op: svuint32_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u32_u64(op: svuint64_t) -> svuint32_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_f32(op: svfloat32_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_f64(op: svfloat64_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_s8(op: svint8_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_s16(op: svint16_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_s32(op: svint32_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_s64(op: svint64_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_u8(op: svuint8_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_u16(op: svuint16_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_u32(op: svuint32_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svreinterpret_u64_u64(op: svuint64_t) -> svuint64_t { + unsafe { simd_reinterpret(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b8)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b8(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i1")] + fn _svrev_b8(op: svbool_t) -> svbool_t; + } + unsafe { _svrev_b8(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b16)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b16(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i1")] + fn _svrev_b16(op: svbool8_t) -> svbool8_t; + } + unsafe { _svrev_b16(op.into()).into() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b32)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b32(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i1")] + fn _svrev_b32(op: svbool4_t) -> svbool4_t; + } + unsafe { _svrev_b32(op.into()).into() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b64)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b64(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i1")] + fn _svrev_b64(op: svbool2_t) -> svbool2_t; + } + unsafe { _svrev_b64(op.into()).into() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4f32")] + fn _svrev_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrev_f32(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2f64")] + fn _svrev_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrev_f64(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s8(op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i8")] + fn _svrev_s8(op: svint8_t) -> svint8_t; + } + unsafe { _svrev_s8(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s16(op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i16")] + fn _svrev_s16(op: svint16_t) -> svint16_t; + } + unsafe { _svrev_s16(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s32(op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i32")] + fn _svrev_s32(op: svint32_t) -> svint32_t; + } + unsafe { _svrev_s32(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s64(op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i64")] + fn _svrev_s64(op: svint64_t) -> svint64_t; + } + unsafe { _svrev_s64(op) } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u8(op: svuint8_t) -> svuint8_t { + unsafe { svrev_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u16(op: svuint16_t) -> svuint16_t { + unsafe { svrev_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u32(op: svuint32_t) -> svuint32_t { + unsafe { svrev_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u64(op: svuint64_t) -> svuint64_t { + unsafe { svrev_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv8i16")] + fn _svrevb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svrevb_s16_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svrevb_s16_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svrevb_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv4i32")] + fn _svrevb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrevb_s32_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevb_s32_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevb_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv2i64")] + fn _svrevb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevb_s64_m(inactive, pg.into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevb_s64_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevb_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svrevb_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrevb_u16_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrevb_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrevb_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevb_u32_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevb_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevb_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevb_u64_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevb_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv4i32")] + fn _svrevh_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrevh_s32_m(inactive, pg.into(), op) } +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevh_s32_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevh_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv2i64")] + fn _svrevh_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevh_s64_m(inactive, pg.into(), op) } +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevh_s64_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevh_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrevh_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevh_u32_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevh_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevh_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevh_u64_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevh_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revw.nxv2i64")] + fn _svrevw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevw_s64_m(inactive, pg.into(), op) } +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevw_s64_m(op, pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevw_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevw_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevw_u64_m(op, pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevw_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv4f32")] + fn _svrinta_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrinta_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinta_f32_m(op, pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinta_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv2f64")] + fn _svrinta_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrinta_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinta_f64_m(op, pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinta_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv4f32")] + fn _svrinti_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrinti_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinti_f32_m(op, pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinti_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv2f64")] + fn _svrinti_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrinti_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinti_f64_m(op, pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinti_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv4f32")] + fn _svrintm_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintm_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintm_f32_m(op, pg, op) +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintm_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv2f64")] + fn _svrintm_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintm_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintm_f64_m(op, pg, op) +} +#[doc = "Round towards -∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintm_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv4f32")] + fn _svrintn_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintn_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintn_f32_m(op, pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintn_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv2f64")] + fn _svrintn_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintn_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintn_f64_m(op, pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintn_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv4f32")] + fn _svrintp_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintp_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintp_f32_m(op, pg, op) +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintp_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv2f64")] + fn _svrintp_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintp_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintp_f64_m(op, pg, op) +} +#[doc = "Round towards +∞"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintp_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv4f32")] + fn _svrintx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintx_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintx_f32_m(op, pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintx_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv2f64")] + fn _svrintx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintx_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintx_f64_m(op, pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintx_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv4f32")] + fn _svrintz_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintz_f32_m(inactive, pg.into(), op) } +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintz_f32_m(op, pg, op) +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintz_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv2f64")] + fn _svrintz_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintz_f64_m(inactive, pg.into(), op) } +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintz_f64_m(op, pg, op) +} +#[doc = "Round towards zero"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintz_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv4f32" + )] + fn _svrsqrte_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrte_f32(op) } +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv2f64" + )] + fn _svrsqrte_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrte_f64(op) } +} +#[doc = "Reciprocal square root step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv4f32" + )] + fn _svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrts_f32(op1, op2) } +} +#[doc = "Reciprocal square root step"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv2f64" + )] + fn _svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrts_f64(op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv4f32")] + fn _svscale_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svscale_f32_m(pg.into(), op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + svscale_f32_m(pg, op1, op2) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + svscale_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv2f64")] + fn _svscale_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svscale_f64_m(pg.into(), op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + svscale_f64_m(pg, op1, op2) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + svscale_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Adjust exponent"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe { simd_select(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe { simd_select::(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { simd_select::(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { simd_select::(pg.into(), op1, op2) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_f32(tuple: svfloat32x2_t, x: svfloat32_t) -> svfloat32x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv8f32.nxv4f32" + )] + fn _svset2_f32(tuple: svfloat32x2_t, imm_index: i32, x: svfloat32_t) -> svfloat32x2_t; + } + unsafe { _svset2_f32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_f64(tuple: svfloat64x2_t, x: svfloat64_t) -> svfloat64x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv4f64.nxv2f64" + )] + fn _svset2_f64(tuple: svfloat64x2_t, imm_index: i32, x: svfloat64_t) -> svfloat64x2_t; + } + unsafe { _svset2_f64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_s8(tuple: svint8x2_t, x: svint8_t) -> svint8x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv32i8.nxv16i8" + )] + fn _svset2_s8(tuple: svint8x2_t, imm_index: i32, x: svint8_t) -> svint8x2_t; + } + unsafe { _svset2_s8(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_s16(tuple: svint16x2_t, x: svint16_t) -> svint16x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv16i16.nxv8i16" + )] + fn _svset2_s16(tuple: svint16x2_t, imm_index: i32, x: svint16_t) -> svint16x2_t; + } + unsafe { _svset2_s16(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_s32(tuple: svint32x2_t, x: svint32_t) -> svint32x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv8i32.nxv4i32" + )] + fn _svset2_s32(tuple: svint32x2_t, imm_index: i32, x: svint32_t) -> svint32x2_t; + } + unsafe { _svset2_s32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_s64(tuple: svint64x2_t, x: svint64_t) -> svint64x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv4i64.nxv2i64" + )] + fn _svset2_s64(tuple: svint64x2_t, imm_index: i32, x: svint64_t) -> svint64x2_t; + } + unsafe { _svset2_s64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_u8(tuple: svuint8x2_t, x: svuint8_t) -> svuint8x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svset2_s8::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_u16(tuple: svuint16x2_t, x: svuint16_t) -> svuint16x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svset2_s16::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_u32(tuple: svuint32x2_t, x: svuint32_t) -> svuint32x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svset2_s32::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset2_u64(tuple: svuint64x2_t, x: svuint64_t) -> svuint64x2_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svset2_s64::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_f32(tuple: svfloat32x3_t, x: svfloat32_t) -> svfloat32x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv12f32.nxv4f32" + )] + fn _svset3_f32(tuple: svfloat32x3_t, imm_index: i32, x: svfloat32_t) -> svfloat32x3_t; + } + unsafe { _svset3_f32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_f64(tuple: svfloat64x3_t, x: svfloat64_t) -> svfloat64x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv6f64.nxv2f64" + )] + fn _svset3_f64(tuple: svfloat64x3_t, imm_index: i32, x: svfloat64_t) -> svfloat64x3_t; + } + unsafe { _svset3_f64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_s8(tuple: svint8x3_t, x: svint8_t) -> svint8x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv48i8.nxv16i8" + )] + fn _svset3_s8(tuple: svint8x3_t, imm_index: i32, x: svint8_t) -> svint8x3_t; + } + unsafe { _svset3_s8(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_s16(tuple: svint16x3_t, x: svint16_t) -> svint16x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv24i16.nxv8i16" + )] + fn _svset3_s16(tuple: svint16x3_t, imm_index: i32, x: svint16_t) -> svint16x3_t; + } + unsafe { _svset3_s16(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_s32(tuple: svint32x3_t, x: svint32_t) -> svint32x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv12i32.nxv4i32" + )] + fn _svset3_s32(tuple: svint32x3_t, imm_index: i32, x: svint32_t) -> svint32x3_t; + } + unsafe { _svset3_s32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_s64(tuple: svint64x3_t, x: svint64_t) -> svint64x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv6i64.nxv2i64" + )] + fn _svset3_s64(tuple: svint64x3_t, imm_index: i32, x: svint64_t) -> svint64x3_t; + } + unsafe { _svset3_s64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_u8(tuple: svuint8x3_t, x: svuint8_t) -> svuint8x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svset3_s8::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_u16(tuple: svuint16x3_t, x: svuint16_t) -> svuint16x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svset3_s16::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_u32(tuple: svuint32x3_t, x: svuint32_t) -> svuint32x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svset3_s32::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset3_u64(tuple: svuint64x3_t, x: svuint64_t) -> svuint64x3_t { + static_assert_range!(IMM_INDEX, 0, 2); + unsafe { svset3_s64::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_f32(tuple: svfloat32x4_t, x: svfloat32_t) -> svfloat32x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv16f32.nxv4f32" + )] + fn _svset4_f32(tuple: svfloat32x4_t, imm_index: i32, x: svfloat32_t) -> svfloat32x4_t; + } + unsafe { _svset4_f32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_f64(tuple: svfloat64x4_t, x: svfloat64_t) -> svfloat64x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv8f64.nxv2f64" + )] + fn _svset4_f64(tuple: svfloat64x4_t, imm_index: i32, x: svfloat64_t) -> svfloat64x4_t; + } + unsafe { _svset4_f64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_s8(tuple: svint8x4_t, x: svint8_t) -> svint8x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv64i8.nxv16i8" + )] + fn _svset4_s8(tuple: svint8x4_t, imm_index: i32, x: svint8_t) -> svint8x4_t; + } + unsafe { _svset4_s8(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_s16(tuple: svint16x4_t, x: svint16_t) -> svint16x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv32i16.nxv8i16" + )] + fn _svset4_s16(tuple: svint16x4_t, imm_index: i32, x: svint16_t) -> svint16x4_t; + } + unsafe { _svset4_s16(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_s32(tuple: svint32x4_t, x: svint32_t) -> svint32x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv16i32.nxv4i32" + )] + fn _svset4_s32(tuple: svint32x4_t, imm_index: i32, x: svint32_t) -> svint32x4_t; + } + unsafe { _svset4_s32(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_s64(tuple: svint64x4_t, x: svint64_t) -> svint64x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.tuple.set.nxv8i64.nxv2i64" + )] + fn _svset4_s64(tuple: svint64x4_t, imm_index: i32, x: svint64_t) -> svint64x4_t; + } + unsafe { _svset4_s64(tuple, IMM_INDEX, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_u8(tuple: svuint8x4_t, x: svuint8_t) -> svuint8x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svset4_s8::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_u16(tuple: svuint16x4_t, x: svuint16_t) -> svuint16x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svset4_s16::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_u32(tuple: svuint32x4_t, x: svuint32_t) -> svuint32x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svset4_s32::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +pub fn svset4_u64(tuple: svuint64x4_t, x: svuint64_t) -> svuint64x4_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svset4_s64::(tuple.as_signed(), x.as_signed()).as_unsigned() } +} +#[doc = "Initialize the first-fault register to all-true"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsetffr)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(setffr))] +pub fn svsetffr() { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.setffr")] + fn _svsetffr(); + } + unsafe { _svsetffr() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4f32")] + fn _svsplice_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsplice_f32(pg.into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2f64")] + fn _svsplice_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsplice_f64(pg.into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv16i8")] + fn _svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsplice_s8(pg, op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv8i16")] + fn _svsplice_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsplice_s16(pg.into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4i32")] + fn _svsplice_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsplice_s32(pg.into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2i64")] + fn _svsplice_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsplice_s64(pg.into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u8])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsplice_s8(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u16])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsplice_s16(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsplice_s32(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u64])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsplice_s64(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv4f32")] + fn _svsqrt_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsqrt_f32_m(inactive, pg.into(), op) } +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svsqrt_f32_m(op, pg, op) +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svsqrt_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv2f64")] + fn _svsqrt_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsqrt_f64_m(inactive, pg.into(), op) } +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svsqrt_f64_m(op, pg, op) +} +#[doc = "Square root"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svsqrt_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] + fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst1_f32(data, pg.into(), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] + fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst1_f64(data, pg.into(), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] + fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst1_s8(data, pg, base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] + fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst1_s16(data, pg.into(), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] + fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst1_s32(data, pg.into(), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] + fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst1_s64(data, pg.into(), base) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svst1_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svst1_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svst1_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svst1_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_f32( + pg: svbool_t, + base: *mut f32, + indices: svint32_t, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4f32" + )] + fn _svst1_scatter_s32index_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + indices: svint32_t, + ); + } + _svst1_scatter_s32index_f32(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_s32( + pg: svbool_t, + base: *mut i32, + indices: svint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i32" + )] + fn _svst1_scatter_s32index_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + indices: svint32_t, + ); + } + _svst1_scatter_s32index_s32(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_u32( + pg: svbool_t, + base: *mut u32, + indices: svint32_t, + data: svuint32_t, +) { + svst1_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svint64_t, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2f64" + )] + fn _svst1_scatter_s64index_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + indices: svint64_t, + ); + } + _svst1_scatter_s64index_f64(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i64" + )] + fn _svst1_scatter_s64index_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + indices: svint64_t, + ); + } + _svst1_scatter_s64index_s64(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svint64_t, + data: svuint64_t, +) { + svst1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_f32( + pg: svbool_t, + base: *mut f32, + indices: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4f32" + )] + fn _svst1_scatter_u32index_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + indices: svint32_t, + ); + } + _svst1_scatter_u32index_f32(data, pg.into(), base, indices.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_s32( + pg: svbool_t, + base: *mut i32, + indices: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i32" + )] + fn _svst1_scatter_u32index_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + indices: svint32_t, + ); + } + _svst1_scatter_u32index_s32(data, pg.into(), base, indices.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_u32( + pg: svbool_t, + base: *mut u32, + indices: svuint32_t, + data: svuint32_t, +) { + svst1_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svuint64_t, + data: svfloat64_t, +) { + svst1_scatter_s64index_f64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svuint64_t, + data: svint64_t, +) { + svst1_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svuint64_t, + data: svuint64_t, +) { + svst1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svint32_t, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32" + )] + fn _svst1_scatter_s32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svst1_scatter_s32offset_f32(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32" + )] + fn _svst1_scatter_s32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svst1_scatter_s32offset_s32(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svint32_t, + data: svuint32_t, +) { + svst1_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svint64_t, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2f64" + )] + fn _svst1_scatter_s64offset_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + offsets: svint64_t, + ); + } + _svst1_scatter_s64offset_f64(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i64" + )] + fn _svst1_scatter_s64offset_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + offsets: svint64_t, + ); + } + _svst1_scatter_s64offset_s64(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svint64_t, + data: svuint64_t, +) { + svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32" + )] + fn _svst1_scatter_u32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svst1_scatter_u32offset_f32(data, pg.into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32" + )] + fn _svst1_scatter_u32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svst1_scatter_u32offset_s32(data, pg.into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svuint64_t, + data: svfloat64_t, +) { + svst1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svuint64_t, + data: svint64_t, +) { + svst1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) { + svst1_scatter_u32base_offset_f32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) { + svst1_scatter_u64base_offset_f64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svfloat32_t, +) { + svst1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svst1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svst1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svfloat64_t, +) { + svst1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svst1_scatter_u32base_offset_f32( + data: svfloat32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1_scatter_u32base_offset_f32(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svst1_scatter_u32base_offset_s32( + data: svint32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1_scatter_u32base_offset_s32(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svst1_scatter_u64base_offset_f64( + data: svfloat64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1_scatter_u64base_offset_f64(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svst1_scatter_u64base_offset_s64( + data: svint64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1_scatter_u64base_offset_s64(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { + svst1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { + svst1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { + svst1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { + svst1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { + svst1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { + svst1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { + svst1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { + svst1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { + svst1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { + svst1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s16(pg: svbool_t, base: *mut i8, data: svint16_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i8")] + fn _svst1b_s16(data: nxv8i8, pg: svbool8_t, ptr: *mut i8); + } + _svst1b_s16(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s32(pg: svbool_t, base: *mut i8, data: svint32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i8")] + fn _svst1b_s32(data: nxv4i8, pg: svbool4_t, ptr: *mut i8); + } + _svst1b_s32(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_s32(pg: svbool_t, base: *mut i16, data: svint32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i16")] + fn _svst1h_s32(data: nxv4i16, pg: svbool4_t, ptr: *mut i16); + } + _svst1h_s32(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s64(pg: svbool_t, base: *mut i8, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i8")] + fn _svst1b_s64(data: nxv2i8, pg: svbool2_t, ptr: *mut i8); + } + _svst1b_s64(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_s64(pg: svbool_t, base: *mut i16, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i16")] + fn _svst1h_s64(data: nxv2i16, pg: svbool2_t, ptr: *mut i16); + } + _svst1h_s64(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_s64(pg: svbool_t, base: *mut i32, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i32")] + fn _svst1w_s64(data: nxv2i32, pg: svbool2_t, ptr: *mut i32); + } + _svst1w_s64(simd_cast(data), pg.into(), base) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u16(pg: svbool_t, base: *mut u8, data: svuint16_t) { + svst1b_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u32(pg: svbool_t, base: *mut u8, data: svuint32_t) { + svst1b_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_u32(pg: svbool_t, base: *mut u16, data: svuint32_t) { + svst1h_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u64(pg: svbool_t, base: *mut u8, data: svuint64_t) { + svst1b_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_u64(pg: svbool_t, base: *mut u16, data: svuint64_t) { + svst1h_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_u64(pg: svbool_t, base: *mut u32, data: svuint64_t) { + svst1w_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8" + )] + fn _svst1b_scatter_s32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svst1b_scatter_s32offset_s32(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16" + )] + fn _svst1h_scatter_s32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svst1h_scatter_s32offset_s32(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svint32_t, + data: svuint32_t, +) { + svst1b_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svint32_t, + data: svuint32_t, +) { + svst1h_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i8" + )] + fn _svst1b_scatter_s64offset_s64( + data: nxv2i8, + pg: svbool2_t, + base: *mut i8, + offsets: svint64_t, + ); + } + _svst1b_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i16" + )] + fn _svst1h_scatter_s64offset_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + offsets: svint64_t, + ); + } + _svst1h_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i32" + )] + fn _svst1w_scatter_s64offset_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + offsets: svint64_t, + ); + } + _svst1w_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svint64_t, + data: svuint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8" + )] + fn _svst1b_scatter_u32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svst1b_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16" + )] + fn _svst1h_scatter_u32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svst1h_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svuint64_t, + data: svint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svuint64_t, + data: svint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svuint64_t, + data: svint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svst1b_scatter_u32base_offset_s32( + data: nxv4i8, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1b_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svst1h_scatter_u32base_offset_s32( + data: nxv4i16, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1h_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svst1b_scatter_u64base_offset_s64( + data: nxv2i8, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1b_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svst1h_scatter_u64base_offset_s64( + data: nxv2i16, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1h_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svst1w_scatter_u64base_offset_s64( + data: nxv2i32, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1w_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1b_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1h_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1b_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1h_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1b_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1h_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1w_scatter_u64base_offset_s64(pg, bases, 0, data) } -#[doc = "Reciprocal exponent"] +#[doc = "Truncate to 8 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frecpx))] -pub fn svrecpx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.frecpx.x.nxv2f64" - )] - fn _svrecpx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; - } - unsafe { _svrecpx_f64_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1b_scatter_u64base_offset_u64(pg, bases, 0, data) } -#[doc = "Reciprocal exponent"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frecpx))] -pub fn svrecpx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { - svrecpx_f64_m(op, pg, op) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1h_scatter_u64base_offset_u64(pg, bases, 0, data) } -#[doc = "Reciprocal exponent"] +#[doc = "Truncate to 32 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frecpx))] -pub fn svrecpx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { - svrecpx_f64_m(svdup_n_f64(0.0), pg, op) +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1w_scatter_u64base_offset_u64(pg, bases, 0, data) } -#[doc = "Reciprocal square root estimate"] +#[doc = "Truncate to 8 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frsqrte))] -pub fn svrsqrte_f32(op: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.frsqrte.x.nxv4f32" - )] - fn _svrsqrte_f32(op: svfloat32_t) -> svfloat32_t; - } - unsafe { _svrsqrte_f32(op) } +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s16(pg: svbool_t, base: *mut i8, vnum: i64, data: svint16_t) { + svst1b_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) } -#[doc = "Reciprocal square root estimate"] +#[doc = "Truncate to 8 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frsqrte))] -pub fn svrsqrte_f64(op: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.frsqrte.x.nxv2f64" - )] - fn _svrsqrte_f64(op: svfloat64_t) -> svfloat64_t; - } - unsafe { _svrsqrte_f64(op) } +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s32(pg: svbool_t, base: *mut i8, vnum: i64, data: svint32_t) { + svst1b_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Reciprocal square root step"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frsqrts))] -pub fn svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.frsqrts.x.nxv4f32" - )] - fn _svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; - } - unsafe { _svrsqrts_f32(op1, op2) } +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_s32(pg: svbool_t, base: *mut i16, vnum: i64, data: svint32_t) { + svst1h_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Reciprocal square root step"] +#[doc = "Truncate to 8 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(frsqrts))] -pub fn svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.frsqrts.x.nxv2f64" - )] - fn _svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; - } - unsafe { _svrsqrts_f64(op1, op2) } +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s64(pg: svbool_t, base: *mut i8, vnum: i64, data: svint64_t) { + svst1b_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")] - fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; - } - unsafe { _svorr_s8_m(pg, op1, op2) } +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_s64(pg: svbool_t, base: *mut i16, vnum: i64, data: svint64_t) { + svst1h_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 32 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svorr_s8_m(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_vnum_s64(pg: svbool_t, base: *mut i32, vnum: i64, data: svint64_t) { + svst1w_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 8 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svorr_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u16(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint16_t) { + svst1b_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 8 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svorr_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u32(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint32_t) { + svst1b_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_u32(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint32_t) { + svst1h_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 8 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svorr_s8_z(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u64(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint64_t) { + svst1b_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")] - fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; - } - unsafe { _svorr_s16_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_u64(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint64_t) { + svst1h_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 32 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svorr_s16_m(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_vnum_u64(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint64_t) { + svst1w_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svorr_s16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32index_s32( + pg: svbool_t, + base: *mut i16, + indices: svint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16" + )] + fn _svst1h_scatter_s32index_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + indices: svint32_t, + ); + } + _svst1h_scatter_s32index_s32(simd_cast(data), pg.into(), base, indices) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svorr_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32index_u32( + pg: svbool_t, + base: *mut u16, + indices: svint32_t, + data: svuint32_t, +) { + svst1h_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i16" + )] + fn _svst1h_scatter_s64index_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + indices: svint64_t, + ); + } + _svst1h_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 32 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svorr_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i32" + )] + fn _svst1w_scatter_s64index_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + indices: svint64_t, + ); + } + _svst1w_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")] - fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svorr_s32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 32 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svorr_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svorr_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32index_s32( + pg: svbool_t, + base: *mut i16, + indices: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16" + )] + fn _svst1h_scatter_u32index_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + indices: svint32_t, + ); + } + _svst1h_scatter_u32index_s32(simd_cast(data), pg.into(), base, indices.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svorr_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32index_u32( + pg: svbool_t, + base: *mut u16, + indices: svuint32_t, + data: svuint32_t, +) { + svst1h_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svuint64_t, + data: svint64_t, +) { + svst1h_scatter_s64index_s64(pg, base, indices.as_signed(), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 32 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svorr_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svuint64_t, + data: svint64_t, +) { + svst1w_scatter_s64index_s64(pg, base, indices.as_signed(), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")] - fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; - } - unsafe { _svorr_s64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svuint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 32 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svorr_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svuint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svorr_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svst1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svorr_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svst1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 32 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svorr_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 16 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Truncate to 32 bits and store"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svorr_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svorr_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_f32(pg: svbool_t, base: *mut f32, data: svfloat32x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4f32")] + fn _svst2_f32(data0: svfloat32_t, data1: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst2_f32( + svget2_f32::<0>(data), + svget2_f32::<1>(data), + pg.into(), + base, + ) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svorr_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_f64(pg: svbool_t, base: *mut f64, data: svfloat64x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2f64")] + fn _svst2_f64(data0: svfloat64_t, data1: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst2_f64( + svget2_f64::<0>(data), + svget2_f64::<1>(data), + pg.into(), + base, + ) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_s8(pg: svbool_t, base: *mut i8, data: svint8x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv16i8")] + fn _svst2_s8(data0: svint8_t, data1: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst2_s8(svget2_s8::<0>(data), svget2_s8::<1>(data), pg, base) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svorr_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_s16(pg: svbool_t, base: *mut i16, data: svint16x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv8i16")] + fn _svst2_s16(data0: svint16_t, data1: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst2_s16( + svget2_s16::<0>(data), + svget2_s16::<1>(data), + pg.into(), + base, + ) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_s32(pg: svbool_t, base: *mut i32, data: svint32x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4i32")] + fn _svst2_s32(data0: svint32_t, data1: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst2_s32( + svget2_s32::<0>(data), + svget2_s32::<1>(data), + pg.into(), + base, + ) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svorr_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_s64(pg: svbool_t, base: *mut i64, data: svint64x2_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2i64")] + fn _svst2_s64(data0: svint64_t, data1: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst2_s64( + svget2_s64::<0>(data), + svget2_s64::<1>(data), + pg.into(), + base, + ) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svorr_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_u8(pg: svbool_t, base: *mut u8, data: svuint8x2_t) { + svst2_s8(pg, base.as_signed(), data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svorr_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_u16(pg: svbool_t, base: *mut u16, data: svuint16x2_t) { + svst2_s16(pg, base.as_signed(), data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_u32(pg: svbool_t, base: *mut u32, data: svuint32x2_t) { + svst2_s32(pg, base.as_signed(), data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svorr_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_u64(pg: svbool_t, base: *mut u64, data: svuint64x2_t) { + svst2_s64(pg, base.as_signed(), data.as_signed()) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x2_t) { + svst2_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svorr_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x2_t) { + svst2_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svorr_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x2_t) { + svst2_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svorr_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x2_t) { + svst2_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x2_t) { + svst2_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svorr_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x2_t) { + svst2_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x2_t) { + svst2_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svorr_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x2_t) { + svst2_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svorr_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x2_t) { + svst2_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store two vectors into two-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svorr_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x2_t) { + svst2_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_f32(pg: svbool_t, base: *mut f32, data: svfloat32x3_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4f32")] + fn _svst3_f32( + data0: svfloat32_t, + data1: svfloat32_t, + data2: svfloat32_t, + pg: svbool4_t, + ptr: *mut f32, + ); + } + _svst3_f32( + svget3_f32::<0>(data), + svget3_f32::<1>(data), + svget3_f32::<2>(data), + pg.into(), + base, + ) } -#[doc = "Bitwise inclusive OR"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(orr))] -pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svorr_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_f64(pg: svbool_t, base: *mut f64, data: svfloat64x3_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2f64")] + fn _svst3_f64( + data0: svfloat64_t, + data1: svfloat64_t, + data2: svfloat64_t, + pg: svbool2_t, + ptr: *mut f64, + ); + } + _svst3_f64( + svget3_f64::<0>(data), + svget3_f64::<1>(data), + svget3_f64::<2>(data), + pg.into(), + base, + ) } -#[doc = "Set predicate elements to true"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b8() -> svbool_t { +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_s8(pg: svbool_t, base: *mut i8, data: svint8x3_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")] - fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv16i8")] + fn _svst3_s8(data0: svint8_t, data1: svint8_t, data2: svint8_t, pg: svbool_t, ptr: *mut i8); } - unsafe { _svptrue_pat_b8(PATTERN) } + _svst3_s8( + svget3_s8::<0>(data), + svget3_s8::<1>(data), + svget3_s8::<2>(data), + pg, + base, + ) } -#[doc = "Set predicate elements to true"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b16() -> svbool_t { +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_s16(pg: svbool_t, base: *mut i16, data: svint16x3_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")] - fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv8i16")] + fn _svst3_s16( + data0: svint16_t, + data1: svint16_t, + data2: svint16_t, + pg: svbool8_t, + ptr: *mut i16, + ); } - unsafe { simd_cast(_svptrue_pat_b16(PATTERN)) } + _svst3_s16( + svget3_s16::<0>(data), + svget3_s16::<1>(data), + svget3_s16::<2>(data), + pg.into(), + base, + ) } -#[doc = "Set predicate elements to true"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b32() -> svbool_t { +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_s32(pg: svbool_t, base: *mut i32, data: svint32x3_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")] - fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4i32")] + fn _svst3_s32( + data0: svint32_t, + data1: svint32_t, + data2: svint32_t, + pg: svbool4_t, + ptr: *mut i32, + ); } - unsafe { simd_cast(_svptrue_pat_b32(PATTERN)) } + _svst3_s32( + svget3_s32::<0>(data), + svget3_s32::<1>(data), + svget3_s32::<2>(data), + pg.into(), + base, + ) } -#[doc = "Set predicate elements to true"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] -pub fn svptrue_pat_b64() -> svbool_t { +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_s64(pg: svbool_t, base: *mut i64, data: svint64x3_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")] - fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2i64")] + fn _svst3_s64( + data0: svint64_t, + data1: svint64_t, + data2: svint64_t, + pg: svbool2_t, + ptr: *mut i64, + ); } - unsafe { simd_cast(_svptrue_pat_b64(PATTERN)) } + _svst3_s64( + svget3_s64::<0>(data), + svget3_s64::<1>(data), + svget3_s64::<2>(data), + pg.into(), + base, + ) } -#[doc = "Conditionally select elements"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { - unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_u8(pg: svbool_t, base: *mut u8, data: svuint8x3_t) { + svst3_s8(pg, base.as_signed(), data.as_signed()) } -#[doc = "Conditionally select elements"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_u16(pg: svbool_t, base: *mut u16, data: svuint16x3_t) { + svst3_s16(pg, base.as_signed(), data.as_signed()) } -#[doc = "Conditionally select elements"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_u32(pg: svbool_t, base: *mut u32, data: svuint32x3_t) { + svst3_s32(pg, base.as_signed(), data.as_signed()) } -#[doc = "Conditionally select elements"] +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_u64(pg: svbool_t, base: *mut u64, data: svuint64x3_t) { + svst3_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x3_t) { + svst3_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x3_t) { + svst3_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x3_t) { + svst3_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) } -#[doc = "Conditionally select elements"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x3_t) { + svst3_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) } -#[doc = "Conditionally select elements"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x3_t) { + svst3_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Conditionally select elements"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x3_t) { + svst3_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Conditionally select elements"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { simd_select(simd_cast::<_, svbool_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x3_t) { + svst3_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) } -#[doc = "Conditionally select elements"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { simd_select(simd_cast::<_, svbool2_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x3_t) { + svst3_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) } -#[doc = "Conditionally select elements"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { simd_select(simd_cast::<_, svbool4_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x3_t) { + svst3_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Conditionally select elements"] +#[doc = "Store three vectors into three-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sel))] -pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { simd_select(simd_cast::<_, svbool8_t>(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x3_t) { + svst3_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Non-truncating store"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f32])"] #[doc = ""] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_f32(pg: svbool_t, base: *mut f32, data: svfloat32x4_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] - fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4f32")] + fn _svst4_f32( + data0: svfloat32_t, + data1: svfloat32_t, + data2: svfloat32_t, + data3: svfloat32_t, + pg: svbool4_t, + ptr: *mut f32, + ); } - _svst1_f32(data, simd_cast(pg), base) + _svst4_f32( + svget4_f32::<0>(data), + svget4_f32::<1>(data), + svget4_f32::<2>(data), + svget4_f32::<3>(data), + pg.into(), + base, + ) } -#[doc = "Non-truncating store"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f64])"] #[doc = ""] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_f64(pg: svbool_t, base: *mut f64, data: svfloat64x4_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] - fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2f64")] + fn _svst4_f64( + data0: svfloat64_t, + data1: svfloat64_t, + data2: svfloat64_t, + data3: svfloat64_t, + pg: svbool2_t, + ptr: *mut f64, + ); } - _svst1_f64(data, simd_cast(pg), base) + _svst4_f64( + svget4_f64::<0>(data), + svget4_f64::<1>(data), + svget4_f64::<2>(data), + svget4_f64::<3>(data), + pg.into(), + base, + ) } -#[doc = "Non-truncating store"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s8])"] #[doc = ""] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1b))] -pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_s8(pg: svbool_t, base: *mut i8, data: svint8x4_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] - fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv16i8")] + fn _svst4_s8( + data0: svint8_t, + data1: svint8_t, + data2: svint8_t, + data3: svint8_t, + pg: svbool_t, + ptr: *mut i8, + ); } - _svst1_s8(data, pg, base) + _svst4_s8( + svget4_s8::<0>(data), + svget4_s8::<1>(data), + svget4_s8::<2>(data), + svget4_s8::<3>(data), + pg, + base, + ) } -#[doc = "Non-truncating store"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s16])"] #[doc = ""] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1h))] -pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_s16(pg: svbool_t, base: *mut i16, data: svint16x4_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] - fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv8i16")] + fn _svst4_s16( + data0: svint16_t, + data1: svint16_t, + data2: svint16_t, + data3: svint16_t, + pg: svbool8_t, + ptr: *mut i16, + ); } - _svst1_s16(data, simd_cast(pg), base) + _svst4_s16( + svget4_s16::<0>(data), + svget4_s16::<1>(data), + svget4_s16::<2>(data), + svget4_s16::<3>(data), + pg.into(), + base, + ) } -#[doc = "Non-truncating store"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s32])"] #[doc = ""] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_s32(pg: svbool_t, base: *mut i32, data: svint32x4_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] - fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4i32")] + fn _svst4_s32( + data0: svint32_t, + data1: svint32_t, + data2: svint32_t, + data3: svint32_t, + pg: svbool4_t, + ptr: *mut i32, + ); } - _svst1_s32(data, simd_cast(pg), base) + _svst4_s32( + svget4_s32::<0>(data), + svget4_s32::<1>(data), + svget4_s32::<2>(data), + svget4_s32::<3>(data), + pg.into(), + base, + ) } -#[doc = "Non-truncating store"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s64])"] #[doc = ""] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_s64(pg: svbool_t, base: *mut i64, data: svint64x4_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] - fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2i64")] + fn _svst4_s64( + data0: svint64_t, + data1: svint64_t, + data2: svint64_t, + data3: svint64_t, + pg: svbool2_t, + ptr: *mut i64, + ); } - _svst1_s64(data, simd_cast(pg), base) + _svst4_s64( + svget4_s64::<0>(data), + svget4_s64::<1>(data), + svget4_s64::<2>(data), + svget4_s64::<3>(data), + pg.into(), + base, + ) } -#[doc = "Non-truncating store"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u8])"] #[doc = ""] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1b))] -pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { - svst1_s8(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_u8(pg: svbool_t, base: *mut u8, data: svuint8x4_t) { + svst4_s8(pg, base.as_signed(), data.as_signed()) } -#[doc = "Non-truncating store"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u16])"] #[doc = ""] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1h))] -pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { - svst1_s16(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_u16(pg: svbool_t, base: *mut u16, data: svuint16x4_t) { + svst4_s16(pg, base.as_signed(), data.as_signed()) } -#[doc = "Non-truncating store"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u32])"] #[doc = ""] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { - svst1_s32(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_u32(pg: svbool_t, base: *mut u32, data: svuint32x4_t) { + svst4_s32(pg, base.as_signed(), data.as_signed()) } -#[doc = "Non-truncating store"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u64])"] #[doc = ""] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { - svst1_s64(pg, base.as_signed(), data.as_signed()) +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_u64(pg: svbool_t, base: *mut u64, data: svuint64x4_t) { + svst4_s64(pg, base.as_signed(), data.as_signed()) } -#[doc = "Subtract"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")] - fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; - } - unsafe { _svsub_f32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x4_t) { + svst4_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsub_f32_m(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x4_t) { + svst4_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svsub_f32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x4_t) { + svst4_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsub_f32_x(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x4_t) { + svst4_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x4_t) { + svst4_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsub_f32_z(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x4_t) { + svst4_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")] - fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; - } - unsafe { _svsub_f64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x4_t) { + svst4_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsub_f64_m(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x4_t) { + svst4_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svsub_f64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x4_t) { + svst4_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Store four vectors into four-element tuples"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsub_f64_x(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x4_t) { + svst4_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4f32")] + fn _svstnt1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svstnt1_f32(data, pg.into(), base) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsub))] -pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsub_f64_z(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2f64")] + fn _svstnt1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svstnt1_f64(data, pg.into(), base) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")] - fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv16i8")] + fn _svstnt1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); } - unsafe { _svsub_s8_m(pg, op1, op2) } + _svstnt1_s8(data, pg, base) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsub_s8_m(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv8i16")] + fn _svstnt1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svstnt1_s16(data, pg.into(), base) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svsub_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4i32")] + fn _svstnt1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svstnt1_s32(data, pg.into(), base) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsub_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2i64")] + fn _svstnt1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svstnt1_s64(data, pg.into(), base) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svstnt1_s8(pg, base.as_signed(), data.as_signed()) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsub_s8_z(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svstnt1_s16(pg, base.as_signed(), data.as_signed()) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")] - fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; - } - unsafe { _svsub_s16_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svstnt1_s32(pg, base.as_signed(), data.as_signed()) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsub_s16_m(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svstnt1_s64(pg, base.as_signed(), data.as_signed()) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svsub_s16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { + svstnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsub_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { + svstnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { + svstnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsub_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { + svstnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")] - fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svsub_s32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { + svstnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsub_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { + svstnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svsub_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { + svstnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsub_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { + svstnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { + svstnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) } -#[doc = "Subtract"] +#[doc = "Non-truncating store, non-temporal"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsub_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { + svstnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")] - fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")] + fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; } - unsafe { _svsub_s64_m(simd_cast(pg), op1, op2) } + unsafe { _svsub_f32_m(pg.into(), op1, op2) } } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsub_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_m(pg, op1, svdup_n_f32(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svsub_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, op1, op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsub_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_x(pg, op1, svdup_n_f32(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsub_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_z(pg, op1, svdup_n_f32(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")] + fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsub_f64_m(pg.into(), op1, op2) } } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsub_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_m(pg, op1, svdup_n_f64(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svsub_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, op1, op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsub_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_x(pg, op1, svdup_n_f64(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsub_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_z(pg, op1, svdup_n_f64(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")] + fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsub_s8_m(pg, op1, op2) } } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsub_u16_m(pg, op1, svdup_n_u16(op2)) +pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_m(pg, op1, svdup_n_s8(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svsub_u16_m(pg, op1, op2) +pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, op1, op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsub_u16_x(pg, op1, svdup_n_u16(op2)) +pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_x(pg, op1, svdup_n_s8(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsub_u16_z(pg, op1, svdup_n_u16(op2)) +pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_z(pg, op1, svdup_n_s8(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")] + fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsub_s16_m(pg.into(), op1, op2) } } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsub_u32_m(pg, op1, svdup_n_u32(op2)) +pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_m(pg, op1, svdup_n_s16(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svsub_u32_m(pg, op1, op2) +pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, op1, op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsub_u32_x(pg, op1, svdup_n_u32(op2)) +pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_x(pg, op1, svdup_n_s16(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsub_u32_z(pg, op1, svdup_n_u32(op2)) +pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_z(pg, op1, svdup_n_s16(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")] + fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsub_s32_m(pg.into(), op1, op2) } } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsub_u64_m(pg, op1, svdup_n_u64(op2)) +pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_m(pg, op1, svdup_n_s32(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svsub_u64_m(pg, op1, op2) +pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, op1, op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsub_u64_x(pg, op1, svdup_n_u64(op2)) +pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_x(pg, op1, svdup_n_s32(op2)) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } #[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[cfg_attr(test, assert_instr(sub))] -pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsub_u64_z(pg, op1, svdup_n_u64(op2)) +pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_z(pg, op1, svdup_n_s32(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv4f32")] - fn _svabd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")] + fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svabd_f32_m(simd_cast(pg), op1, op2) } + unsafe { _svsub_s64_m(pg.into(), op1, op2) } } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svabd_f32_m(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svabd_f32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, op1, op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svabd_f32_x(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svabd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svabd_f32_z(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_z(pg, op1, svdup_n_s64(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv2f64")] - fn _svabd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; - } - unsafe { _svabd_f64_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svabd_f64_m(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svabd_f64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, op1, op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svabd_f64_x(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_x(pg, op1, svdup_n_u8(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svabd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabd))] -pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svabd_f64_z(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_z(pg, op1, svdup_n_u8(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv16i8")] - fn _svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; - } - unsafe { _svabd_s8_m(pg, op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svabd_s8_m(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_m(pg, op1, svdup_n_u16(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svabd_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, op1, op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svabd_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_x(pg, op1, svdup_n_u16(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svabd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svabd_s8_z(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_z(pg, op1, svdup_n_u16(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv8i16")] - fn _svabd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; - } - unsafe { _svabd_s16_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svabd_s16_m(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_m(pg, op1, svdup_n_u32(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svabd_s16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, op1, op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svabd_s16_x(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_x(pg, op1, svdup_n_u32(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svabd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svabd_s16_z(pg, op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_z(pg, op1, svdup_n_u32(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv4i32")] - fn _svabd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svabd_s32_m(simd_cast(pg), op1, op2) } +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svabd_s32_m(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svabd_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, op1, op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svabd_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svabd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svabd_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_z(pg, op1, svdup_n_u64(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv2i64")] - fn _svabd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv4f32")] + fn _svsubr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; } - unsafe { _svabd_s64_m(simd_cast(pg), op1, op2) } + unsafe { _svsubr_f32_m(pg.into(), op1, op2) } } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svabd_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_m(pg, op1, svdup_n_f32(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svabd_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, op1, op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svabd_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_x(pg, op1, svdup_n_f32(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svabd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sabd))] -pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svabd_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_z(pg, op1, svdup_n_f32(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv16i8")] - fn _svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv2f64")] + fn _svsubr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; } - unsafe { _svabd_u8_m(pg, op1, op2) } + unsafe { _svsubr_f64_m(pg.into(), op1, op2) } } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svabd_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_m(pg, op1, svdup_n_f64(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svabd_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, op1, op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svabd_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_x(pg, op1, svdup_n_f64(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svabd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svabd_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_z(pg, op1, svdup_n_f64(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv8i16")] - fn _svabd_u16_m(pg: svbool8_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv16i8")] + fn _svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svabd_u16_m(simd_cast(pg), op1, op2) } + unsafe { _svsubr_s8_m(pg, op1, op2) } } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svabd_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_m(pg, op1, svdup_n_s8(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svabd_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, op1, op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svabd_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_x(pg, op1, svdup_n_s8(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svabd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svabd_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_z(pg, op1, svdup_n_s8(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv4i32")] - fn _svabd_u32_m(pg: svbool4_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv8i16")] + fn _svsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { _svabd_u32_m(simd_cast(pg), op1, op2) } + unsafe { _svsubr_s16_m(pg.into(), op1, op2) } } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svabd_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_m(pg, op1, svdup_n_s16(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svabd_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, op1, op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svabd_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_x(pg, op1, svdup_n_s16(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svabd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svabd_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_z(pg, op1, svdup_n_s16(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv2i64")] - fn _svabd_u64_m(pg: svbool2_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv4i32")] + fn _svsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; } - unsafe { _svabd_u64_m(simd_cast(pg), op1, op2) } + unsafe { _svsubr_s32_m(pg.into(), op1, op2) } } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svabd_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_m(pg, op1, svdup_n_s32(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svabd_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, op1, op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svabd_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_x(pg, op1, svdup_n_s32(op2)) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svabd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) } -#[doc = "Absolute difference"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uabd))] -pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svabd_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_z(pg, op1, svdup_n_s32(op2)) } - -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f32_m(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv4f32")] - fn _svabs_f32_m(pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv2i64")] + fn _svsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svabs_f32_m(simd_cast(pg), op) } + unsafe { _svsubr_s64_m(pg.into(), op1, op2) } } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { - svabs_f32_m(pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_m(pg, op1, svdup_n_s64(op2)) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { - svabs_f32_m(pg, svsel_f32(pg, op, svdup_n_f32(0.0))) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, op1, op2) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f64_m(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv2f64")] - fn _svabs_f64_m(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; - } - unsafe { _svabs_f64_m(simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_x(pg, op1, svdup_n_s64(op2)) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { - svabs_f64_m(pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fabs))] -pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { - svabs_f64_m(pg, svsel_f64(pg, op, svdup_n_f64(0.0))) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_z(pg, op1, svdup_n_s64(op2)) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s8_m(pg: svbool_t, op: svint8_t) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv16i8")] - fn _svabs_s8_m(pg: svbool_t, op: svint8_t) -> svint8_t; - } - unsafe { _svabs_s8_m(pg, op) } +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsubr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { - svabs_s8_m(pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_m(pg, op1, svdup_n_u8(op2)) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { - svabs_s8_m(pg, svsel_s8(pg, op, svdup_n_s8(0))) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, op1, op2) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s16_m(pg: svbool_t, op: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv8i16")] - fn _svabs_s16_m(pg: svbool8_t, op: svint16_t) -> svint16_t; - } - unsafe { _svabs_s16_m(simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_x(pg, op1, svdup_n_u8(op2)) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { - svabs_s16_m(pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { - svabs_s16_m(pg, svsel_s16(pg, op, svdup_n_s16(0))) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_z(pg, op1, svdup_n_u8(op2)) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s32_m(pg: svbool_t, op: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv4i32")] - fn _svabs_s32_m(pg: svbool4_t, op: svint32_t) -> svint32_t; - } - unsafe { _svabs_s32_m(simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsubr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { - svabs_s32_m(pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_m(pg, op1, svdup_n_u16(op2)) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { - svabs_s32_m(pg, svsel_s32(pg, op, svdup_n_s32(0))) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, op1, op2) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s64_m(pg: svbool_t, op: svint64_t) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv2i64")] - fn _svabs_s64_m(pg: svbool2_t, op: svint64_t) -> svint64_t; - } - unsafe { _svabs_s64_m(simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_x(pg, op1, svdup_n_u16(op2)) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { - svabs_s64_m(pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) } -#[doc = "Absolute value"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(abs))] -pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { - svabs_s64_m(pg, svsel_s64(pg, op, svdup_n_s64(0))) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_z(pg, op1, svdup_n_u16(op2)) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv16i8")] - fn _svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; - } - unsafe { _svcnot_s8_m(inactive, pg, op) } +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsubr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { - svcnot_s8_m(op, pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_m(pg, op1, svdup_n_u32(op2)) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { - svcnot_s8_m(svdup_n_s8(0), pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, op1, op2) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv8i16")] - fn _svcnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; - } - unsafe { _svcnot_s16_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_x(pg, op1, svdup_n_u32(op2)) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { - svcnot_s16_m(op, pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { - svcnot_s16_m(svdup_n_s16(0), pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_z(pg, op1, svdup_n_u32(op2)) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv4i32")] - fn _svcnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; - } - unsafe { _svcnot_s32_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsubr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_m)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { - svcnot_s32_m(op, pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_m(pg, op1, svdup_n_u64(op2)) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { - svcnot_s32_m(svdup_n_s32(0), pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, op1, op2) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_x)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv2i64")] - fn _svcnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; - } - unsafe { _svcnot_s64_m(inactive, simd_cast(pg), op) } +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_x(pg, op1, svdup_n_u64(op2)) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { - svcnot_s64_m(op, pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Subtract reversed"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_z)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { - svcnot_s64_m(svdup_n_s64(0), pg, op) +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(sudot, IMM_INDEX = 0))] +pub fn svsudot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svuint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sudot.lane.nxv4i32" + )] + fn _svsudot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svsudot_lane_s32(op1, op2, op3.as_signed(), IMM_INDEX) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Dot product (signed × unsigned)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { - unsafe { svcnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svsudot_s32(op1: svint32_t, op2: svint8_t, op3: svuint8_t) -> svint32_t { + svusdot_s32(op1, op3, op2) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Dot product (signed × unsigned)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_n_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { - svcnot_u8_m(op, pg, op) +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svsudot_n_s32(op1: svint32_t, op2: svint8_t, op3: u8) -> svint32_t { + svsudot_s32(op1, op2, svdup_n_u8(op3)) } -#[doc = "Conditional bitwise NOT"] +#[doc = "Table lookup in single-vector table"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { - svcnot_u8_m(svdup_n_u8(0), pg, op) +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_f32(data: svfloat32_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4f32")] + fn _svtbl_f32(data: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { _svtbl_f32(data, indices.as_signed()) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Table lookup in single-vector table"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { - unsafe { svcnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_f64(data: svfloat64_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2f64")] + fn _svtbl_f64(data: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { _svtbl_f64(data, indices.as_signed()) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Table lookup in single-vector table"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { - svcnot_u16_m(op, pg, op) +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s8(data: svint8_t, indices: svuint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv16i8")] + fn _svtbl_s8(data: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { _svtbl_s8(data, indices.as_signed()) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Table lookup in single-vector table"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { - svcnot_u16_m(svdup_n_u16(0), pg, op) +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s16(data: svint16_t, indices: svuint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv8i16")] + fn _svtbl_s16(data: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { _svtbl_s16(data, indices.as_signed()) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Table lookup in single-vector table"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { - unsafe { svcnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s32(data: svint32_t, indices: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4i32")] + fn _svtbl_s32(data: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svtbl_s32(data, indices.as_signed()) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Table lookup in single-vector table"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { - svcnot_u32_m(op, pg, op) +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s64(data: svint64_t, indices: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2i64")] + fn _svtbl_s64(data: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svtbl_s64(data, indices.as_signed()) } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Table lookup in single-vector table"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { - svcnot_u32_m(svdup_n_u32(0), pg, op) +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u8(data: svuint8_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbl_s8(data.as_signed(), indices).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Table lookup in single-vector table"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { - unsafe { svcnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u16(data: svuint16_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbl_s16(data.as_signed(), indices).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Table lookup in single-vector table"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { - svcnot_u64_m(op, pg, op) +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u32(data: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbl_s32(data.as_signed(), indices).as_unsigned() } } -#[doc = "Conditional bitwise NOT"] +#[doc = "Table lookup in single-vector table"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnot))] -pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { - svcnot_u64_m(svdup_n_u64(0), pg, op) +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u64(data: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbl_s64(data.as_signed(), indices).as_unsigned() } } -// ============================================================================ -// Batch 3: Reduction/Horizontal Operations -// ============================================================================ -#[doc = "Add across vector"] +#[doc = "Trigonometric multiply-add coefficient"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 { +#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] +pub fn svtmad_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM3, 0, 7); unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv16i8")] - fn _svaddv_s8(pg: svbool8_t, op: svint8_t) -> i64; + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftmad.x.nxv4f32" + )] + fn _svtmad_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t; } - unsafe { _svaddv_s8(simd_cast(pg), op) } + unsafe { _svtmad_f32(op1, op2, IMM3) } } -#[doc = "Add across vector"] +#[doc = "Trigonometric multiply-add coefficient"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 { +#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] +pub fn svtmad_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM3, 0, 7); unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv8i16")] - fn _svaddv_s16(pg: svbool4_t, op: svint16_t) -> i64; + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftmad.x.nxv2f64" + )] + fn _svtmad_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t; } - unsafe { _svaddv_s16(simd_cast(pg), op) } + unsafe { _svtmad_f64(op1, op2, IMM3) } } -#[doc = "Add across vector"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b8)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 { +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv4i32")] - fn _svaddv_s32(pg: svbool2_t, op: svint32_t) -> i64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i1")] + fn _svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; } - unsafe { _svaddv_s32(simd_cast(pg), op) } + unsafe { _svtrn1_b8(op1, op2) } } -#[doc = "Add across vector"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b16)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 { +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addv.nxv2i64")] - fn _svaddv_s64(pg: svbool_t, op: svint64_t) -> i64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i1")] + fn _svtrn1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; } - unsafe { _svaddv_s64(pg, op) } + unsafe { _svtrn1_b16(op1.into(), op2.into()).into() } } -#[doc = "Add across vector"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b32)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 { - unsafe { svaddv_s8(pg, op.as_signed()) as u64 } +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i1")] + fn _svtrn1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svtrn1_b32(op1.into(), op2.into()).into() } } -#[doc = "Add across vector"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b64)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 { - unsafe { svaddv_s16(pg, op.as_signed()) as u64 } +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i1")] + fn _svtrn1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svtrn1_b64(op1.into(), op2.into()).into() } } -#[doc = "Add across vector"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 { - unsafe { svaddv_s32(pg, op.as_signed()) as u64 } +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4f32")] + fn _svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn1_f32(op1, op2) } } -#[doc = "Add across vector"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(addv))] -pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 { - unsafe { svaddv_s64(pg, op.as_signed()) as u64 } +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2f64")] + fn _svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn1_f64(op1, op2) } } -#[doc = "Add across vector"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(faddv))] -pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv4f32")] - fn _svaddv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i8")] + fn _svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svaddv_f32(simd_cast(pg), op) } + unsafe { _svtrn1_s8(op1, op2) } } -#[doc = "Add across vector"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(faddv))] -pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv2f64")] - fn _svaddv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i16")] + fn _svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { _svaddv_f64(simd_cast(pg), op) } + unsafe { _svtrn1_s16(op1, op2) } } -#[doc = "Count active predicate elements"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)]"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntb))] -pub fn svcntb() -> i32 { +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntb")] - fn _svcntb() -> i32; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i32")] + fn _svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; } - unsafe { _svcntb() } + unsafe { _svtrn1_s32(op1, op2) } } -#[doc = "Count active predicate elements"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)]"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cnth))] -pub fn svcnth() -> i32 { +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnth")] - fn _svcnth() -> i32; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i64")] + fn _svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svcnth() } + unsafe { _svtrn1_s64(op1, op2) } } -#[doc = "Count active predicate elements"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)]"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntd))] -pub fn svcntd() -> i32 { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntd")] - fn _svcntd() -> i32; - } - unsafe { _svcntd() } +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Count active predicate elements"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntp))] -pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv16i1")] - fn _svcntp_b8(pg: svbool8_t, op: svbool8_t) -> u64; - } - unsafe { _svcntp_b8(simd_cast(pg), simd_cast(op)) } +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Count active predicate elements"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntp))] -pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv8i1")] - fn _svcntp_b16(pg: svbool4_t, op: svbool4_t) -> u64; - } - unsafe { _svcntp_b16(simd_cast(pg), simd_cast(op)) } +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Count active predicate elements"] +#[doc = "Interleave even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntp))] -pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 { +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv4i1")] - fn _svcntp_b32(pg: svbool2_t, op: svbool2_t) -> u64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4f32")] + fn _svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; } - unsafe { _svcntp_b32(simd_cast(pg), simd_cast(op)) } + unsafe { _svtrn1q_f32(op1, op2) } } -#[doc = "Count active predicate elements"] +#[doc = "Interleave even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp[_b64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cntp))] -pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv2i1")] - fn _svcntp_b64(pg: svbool_t, op: svbool_t) -> u64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2f64")] + fn _svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; } - unsafe { _svcntp_b64(pg, op) } + unsafe { _svtrn1q_f64(op1, op2) } } -#[doc = "Count leading zeros"] +#[doc = "Interleave even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s8])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_s8(pg: svbool_t, op: svint8_t) -> svint8_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv16i8")] - fn _svclz_s8(pg: svbool8_t, op: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv16i8")] + fn _svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svclz_s8(simd_cast(pg), op) } + unsafe { _svtrn1q_s8(op1, op2) } } -#[doc = "Count leading zeros"] +#[doc = "Interleave even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s16])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_s16(pg: svbool_t, op: svint16_t) -> svint16_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv8i16")] - fn _svclz_s16(pg: svbool4_t, op: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv8i16")] + fn _svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { _svclz_s16(simd_cast(pg), op) } + unsafe { _svtrn1q_s16(op1, op2) } } -#[doc = "Count leading zeros"] +#[doc = "Interleave even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_s32(pg: svbool_t, op: svint32_t) -> svint32_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv4i32")] - fn _svclz_s32(pg: svbool2_t, op: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4i32")] + fn _svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; } - unsafe { _svclz_s32(simd_cast(pg), op) } + unsafe { _svtrn1q_s32(op1, op2) } } -#[doc = "Count leading zeros"] +#[doc = "Interleave even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_s64(pg: svbool_t, op: svint64_t) -> svint64_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv2i64")] - fn _svclz_s64(pg: svbool_t, op: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2i64")] + fn _svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svclz_s64(pg, op) } + unsafe { _svtrn1q_s64(op1, op2) } } -#[doc = "Count leading zeros"] +#[doc = "Interleave even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u8])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_u8(pg: svbool_t, op: svuint8_t) -> svuint8_t { - unsafe { svclz_s8(pg, op.as_signed()).as_unsigned() } +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Count leading zeros"] +#[doc = "Interleave even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u16])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_u16(pg: svbool_t, op: svuint16_t) -> svuint16_t { - unsafe { svclz_s16(pg, op.as_signed()).as_unsigned() } +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Count leading zeros"] +#[doc = "Interleave even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { - unsafe { svclz_s32(pg, op.as_signed()).as_unsigned() } +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Count leading zeros"] +#[doc = "Interleave even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(clz))] -pub fn svclz_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { - unsafe { svclz_s64(pg, op.as_signed()).as_unsigned() } +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Count leading sign bits"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b8)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_s8(pg: svbool_t, op: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv16i8")] - fn _svcls_s8(pg: svbool8_t, op: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i1")] + fn _svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; } - unsafe { _svcls_s8(simd_cast(pg), op) } + unsafe { _svtrn2_b8(op1, op2) } } -#[doc = "Count leading sign bits"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b16)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_s16(pg: svbool_t, op: svint16_t) -> svint16_t { +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv8i16")] - fn _svcls_s16(pg: svbool4_t, op: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i1")] + fn _svtrn2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; } - unsafe { _svcls_s16(simd_cast(pg), op) } + unsafe { _svtrn2_b16(op1.into(), op2.into()).into() } } -#[doc = "Count leading sign bits"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b32)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_s32(pg: svbool_t, op: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv4i32")] - fn _svcls_s32(pg: svbool2_t, op: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i1")] + fn _svtrn2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; } - unsafe { _svcls_s32(simd_cast(pg), op) } + unsafe { _svtrn2_b32(op1.into(), op2.into()).into() } } -#[doc = "Count leading sign bits"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b64)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_s64(pg: svbool_t, op: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv2i64")] - fn _svcls_s64(pg: svbool_t, op: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i1")] + fn _svtrn2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; } - unsafe { _svcls_s64(pg, op) } + unsafe { _svtrn2_b64(op1.into(), op2.into()).into() } } -#[doc = "Count leading sign bits"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_u8(pg: svbool_t, op: svuint8_t) -> svuint8_t { - unsafe { svcls_s8(pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4f32")] + fn _svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn2_f32(op1, op2) } } -#[doc = "Count leading sign bits"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_u16(pg: svbool_t, op: svuint16_t) -> svuint16_t { - unsafe { svcls_s16(pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2f64")] + fn _svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn2_f64(op1, op2) } } -#[doc = "Count leading sign bits"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { - unsafe { svcls_s32(pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i8")] + fn _svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn2_s8(op1, op2) } } -#[doc = "Count leading sign bits"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(cls))] -pub fn svcls_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { - unsafe { svcls_s64(pg, op.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i16")] + fn _svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn2_s16(op1, op2) } } - -// ============================================================================ -// 第4批:地址与加载/存储族 Intrinsics -// ============================================================================ - -// ---------------------------------------------------------------------------- -// svadr - 地址生成函数 -// ---------------------------------------------------------------------------- - -#[doc = "Address generation"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadr[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(adr))] -pub unsafe fn svadr_s32(pg: svbool_t, base: *const i8, offset: svint32_t) -> svuint64_t { +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adr.nxv4i32")] - fn _svadr_s32(pg: svbool4_t, base: *const i8, offset: svint32_t) -> svuint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i32")] + fn _svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; } - _svadr_s32(simd_cast(pg), base, offset) + unsafe { _svtrn2_s32(op1, op2) } } - -#[doc = "Address generation"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadr[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(adr))] -pub unsafe fn svadr_s64(pg: svbool_t, base: *const i8, offset: svint64_t) -> svuint64_t { +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adr.nxv2i64")] - fn _svadr_s64(pg: svbool2_t, base: *const i8, offset: svint64_t) -> svuint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i64")] + fn _svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; } - _svadr_s64(simd_cast(pg), base, offset) + unsafe { _svtrn2_s64(op1, op2) } } - -#[doc = "Address generation"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadr[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(adr))] -pub unsafe fn svadr_u32(pg: svbool_t, base: *const i8, offset: svuint32_t) -> svuint64_t { - unsafe { svadr_s32(pg, base, offset.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } } - -#[doc = "Address generation"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadr[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(adr))] -pub unsafe fn svadr_u64(pg: svbool_t, base: *const i8, offset: svuint64_t) -> svuint64_t { - unsafe { svadr_s64(pg, base, offset.as_signed()).as_unsigned() } +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } } - -// ---------------------------------------------------------------------------- -// svld1_vnum - 带向量索引的加载 -// ---------------------------------------------------------------------------- - -#[doc = "Unextended load (vector base + scalar offset)"] +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] - fn _svld1_vnum_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4f32")] + fn _svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; } - let offset_base = base.add(vnum as usize * 4); - _svld1_vnum_f32(simd_cast(pg), offset_base) + unsafe { _svtrn2q_f32(op1, op2) } } - -#[doc = "Unextended load (vector base + scalar offset)"] +#[doc = "Interleave odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2f64")] + fn _svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn2q_f64(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s8])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] - fn _svld1_vnum_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv16i8")] + fn _svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; } - let offset_base = base.add(vnum as usize * 2); - _svld1_vnum_f64(simd_cast(pg), offset_base) + unsafe { _svtrn2q_s8(op1, op2) } } - -#[doc = "Unextended load (vector base + scalar offset)"] +#[doc = "Interleave odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv8i16")] + fn _svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn2q_s16(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1b))] -pub unsafe fn svld1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] - fn _svld1_vnum_s8(pg: svbool_t, base: *const i8) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4i32")] + fn _svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; } - let offset_base = base.add(vnum as usize * 16); - _svld1_vnum_s8(pg, offset_base) + unsafe { _svtrn2q_s32(op1, op2) } } - -#[doc = "Unextended load (vector base + scalar offset)"] +#[doc = "Interleave odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2i64")] + fn _svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn2q_s64(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u8])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1h))] -pub unsafe fn svld1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Trigonometric starting value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f32])"] +#[inline] +#[target_feature(enable = "sve")] +#[cfg_attr(test, assert_instr(ftsmul))] +pub fn svtsmul_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] - fn _svld1_vnum_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftsmul.x.nxv4f32" + )] + fn _svtsmul_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; } - let offset_base = base.add(vnum as usize * 8); - _svld1_vnum_s16(simd_cast(pg), offset_base) + unsafe { _svtsmul_f32(op1, op2.as_signed()) } } - -#[doc = "Unextended load (vector base + scalar offset)"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s32])"] +#[doc = "Trigonometric starting value"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { +#[cfg_attr(test, assert_instr(ftsmul))] +pub fn svtsmul_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] - fn _svld1_vnum_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftsmul.x.nxv2f64" + )] + fn _svtsmul_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; } - let offset_base = base.add(vnum as usize * 4); - _svld1_vnum_s32(simd_cast(pg), offset_base) + unsafe { _svtsmul_f64(op1, op2.as_signed()) } } - -#[doc = "Unextended load (vector base + scalar offset)"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s64])"] +#[doc = "Trigonometric select coefficient"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { +#[cfg_attr(test, assert_instr(ftssel))] +pub fn svtssel_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] - fn _svld1_vnum_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftssel.x.nxv4f32" + )] + fn _svtssel_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; } - let offset_base = base.add(vnum as usize * 2); - _svld1_vnum_s64(simd_cast(pg), offset_base) + unsafe { _svtssel_f32(op1, op2.as_signed()) } } - -#[doc = "Unextended load (vector base + scalar offset)"] -#[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u8])"] +#[doc = "Trigonometric select coefficient"] #[doc = ""] -#[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1b))] -pub unsafe fn svld1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { - svld1_vnum_s8(pg, base.as_signed(), vnum).as_unsigned() +#[cfg_attr(test, assert_instr(ftssel))] +pub fn svtssel_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftssel.x.nxv2f64" + )] + fn _svtssel_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svtssel_f64(op1, op2.as_signed()) } } - -#[doc = "Unextended load (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of two vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f32)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1h))] -pub unsafe fn svld1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { - svld1_vnum_s16(pg, base.as_signed(), vnum).as_unsigned() +pub unsafe fn svundef2_f32() -> svfloat32x2_t { + simd_reinterpret(()) } - -#[doc = "Unextended load (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of two vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f64)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { - svld1_vnum_s32(pg, base.as_signed(), vnum).as_unsigned() +pub unsafe fn svundef2_f64() -> svfloat64x2_t { + simd_reinterpret(()) } - -#[doc = "Unextended load (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of two vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s8)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { - svld1_vnum_s64(pg, base.as_signed(), vnum).as_unsigned() +pub unsafe fn svundef2_s8() -> svint8x2_t { + simd_reinterpret(()) } - -// ---------------------------------------------------------------------------- -// svld1_gather - 聚集加载 -// ---------------------------------------------------------------------------- - -#[doc = "Gather load"] +#[doc = "Create an uninitialized tuple of two vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_s32index]_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s16)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_gather_s32index_f32( - pg: svbool_t, - base: *const f32, - indices: svint32_t, -) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.gather.index.nxv4f32")] - fn _svld1_gather_s32index_f32(pg: svbool4_t, base: *const f32, indices: svint32_t) -> svfloat32_t; - } - _svld1_gather_s32index_f32(simd_cast(pg), base, indices) +pub unsafe fn svundef2_s16() -> svint16x2_t { + simd_reinterpret(()) } - -#[doc = "Gather load"] +#[doc = "Create an uninitialized tuple of two vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_s64index]_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s32)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_gather_s64index_f64( - pg: svbool_t, - base: *const f64, - indices: svint64_t, -) -> svfloat64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2f64")] - fn _svld1_gather_s64index_f64(pg: svbool2_t, base: *const f64, indices: svint64_t) -> svfloat64_t; - } - _svld1_gather_s64index_f64(simd_cast(pg), base, indices) +pub unsafe fn svundef2_s32() -> svint32x2_t { + simd_reinterpret(()) } - -#[doc = "Gather load"] +#[doc = "Create an uninitialized tuple of two vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_s32index]_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s64)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_gather_s32index_s32( - pg: svbool_t, - base: *const i32, - indices: svint32_t, -) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.gather.index.nxv4i32")] - fn _svld1_gather_s32index_s32(pg: svbool4_t, base: *const i32, indices: svint32_t) -> svint32_t; - } - _svld1_gather_s32index_s32(simd_cast(pg), base, indices) +pub unsafe fn svundef2_s64() -> svint64x2_t { + simd_reinterpret(()) } - -#[doc = "Gather load"] +#[doc = "Create an uninitialized tuple of two vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_s64index]_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u8)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_gather_s64index_s64( - pg: svbool_t, - base: *const i64, - indices: svint64_t, -) -> svint64_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i64")] - fn _svld1_gather_s64index_s64(pg: svbool2_t, base: *const i64, indices: svint64_t) -> svint64_t; - } - _svld1_gather_s64index_s64(simd_cast(pg), base, indices) +pub unsafe fn svundef2_u8() -> svuint8x2_t { + simd_reinterpret(()) } - -#[doc = "Gather load"] +#[doc = "Create an uninitialized tuple of two vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32index]_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u16)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1w))] -pub unsafe fn svld1_gather_u32index_u32( - pg: svbool_t, - base: *const u32, - indices: svuint32_t, -) -> svuint32_t { - unsafe { - svld1_gather_s32index_s32(pg, base.as_signed(), indices.as_signed()).as_unsigned() - } +pub unsafe fn svundef2_u16() -> svuint16x2_t { + simd_reinterpret(()) } - -#[doc = "Gather load"] +#[doc = "Create an uninitialized tuple of two vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64index]_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u32)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(ld1d))] -pub unsafe fn svld1_gather_u64index_u64( - pg: svbool_t, - base: *const u64, - indices: svuint64_t, -) -> svuint64_t { - unsafe { - svld1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() - } +pub unsafe fn svundef2_u32() -> svuint32x2_t { + simd_reinterpret(()) } - -// ---------------------------------------------------------------------------- -// svst1_vnum - 带向量索引的存储 -// ---------------------------------------------------------------------------- - -#[doc = "Unextended store (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of two vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u64)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] - fn _svst1_vnum_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); - } - let offset_base = base.add(vnum as usize * 4); - _svst1_vnum_f32(data, simd_cast(pg), offset_base) +pub unsafe fn svundef2_u64() -> svuint64x2_t { + simd_reinterpret(()) } - -#[doc = "Unextended store (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of three vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f32)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] - fn _svst1_vnum_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); - } - let offset_base = base.add(vnum as usize * 2); - _svst1_vnum_f64(data, simd_cast(pg), offset_base) +pub unsafe fn svundef3_f32() -> svfloat32x3_t { + simd_reinterpret(()) } - -#[doc = "Unextended store (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of three vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f64)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1b))] -pub unsafe fn svst1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] - fn _svst1_vnum_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); - } - let offset_base = base.add(vnum as usize * 16); - _svst1_vnum_s8(data, pg, offset_base) +pub unsafe fn svundef3_f64() -> svfloat64x3_t { + simd_reinterpret(()) } - -#[doc = "Unextended store (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of three vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s8)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1h))] -pub unsafe fn svst1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] - fn _svst1_vnum_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); - } - let offset_base = base.add(vnum as usize * 8); - _svst1_vnum_s16(data, simd_cast(pg), offset_base) +pub unsafe fn svundef3_s8() -> svint8x3_t { + simd_reinterpret(()) } - -#[doc = "Unextended store (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of three vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s16)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] - fn _svst1_vnum_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); - } - let offset_base = base.add(vnum as usize * 4); - _svst1_vnum_s32(data, simd_cast(pg), offset_base) +pub unsafe fn svundef3_s16() -> svint16x3_t { + simd_reinterpret(()) } - -#[doc = "Unextended store (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of three vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s32)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] - fn _svst1_vnum_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); - } - let offset_base = base.add(vnum as usize * 2); - _svst1_vnum_s64(data, simd_cast(pg), offset_base) +pub unsafe fn svundef3_s32() -> svint32x3_t { + simd_reinterpret(()) } - -#[doc = "Unextended store (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of three vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s64)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1b))] -pub unsafe fn svst1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { - svst1_vnum_s8(pg, base.as_signed(), vnum, data.as_signed()) +pub unsafe fn svundef3_s64() -> svint64x3_t { + simd_reinterpret(()) } - -#[doc = "Unextended store (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of three vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u8)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1h))] -pub unsafe fn svst1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { - svst1_vnum_s16(pg, base.as_signed(), vnum, data.as_signed()) +pub unsafe fn svundef3_u8() -> svuint8x3_t { + simd_reinterpret(()) } - -#[doc = "Unextended store (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of three vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u16)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { - svst1_vnum_s32(pg, base.as_signed(), vnum, data.as_signed()) +pub unsafe fn svundef3_u16() -> svuint16x3_t { + simd_reinterpret(()) } - -#[doc = "Unextended store (vector base + scalar offset)"] +#[doc = "Create an uninitialized tuple of three vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u32)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { - svst1_vnum_s64(pg, base.as_signed(), vnum, data.as_signed()) +pub unsafe fn svundef3_u32() -> svuint32x3_t { + simd_reinterpret(()) } - -// ---------------------------------------------------------------------------- -// svst1_scatter - 分散存储 -// ---------------------------------------------------------------------------- - -#[doc = "Scatter store"] +#[doc = "Create an uninitialized tuple of three vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_s32index]_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u64)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_scatter_s32index_f32( - pg: svbool_t, - base: *mut f32, - indices: svint32_t, - data: svfloat32_t, -) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.scatter.index.nxv4f32")] - fn _svst1_scatter_s32index_f32(data: svfloat32_t, pg: svbool4_t, base: *mut f32, indices: svint32_t); - } - _svst1_scatter_s32index_f32(data, simd_cast(pg), base, indices) +pub unsafe fn svundef3_u64() -> svuint64x3_t { + simd_reinterpret(()) } - -#[doc = "Scatter store"] +#[doc = "Create an uninitialized tuple of four vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_s64index]_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f32)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_scatter_s64index_f64( - pg: svbool_t, - base: *mut f64, - indices: svint64_t, - data: svfloat64_t, -) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2f64")] - fn _svst1_scatter_s64index_f64(data: svfloat64_t, pg: svbool2_t, base: *mut f64, indices: svint64_t); - } - _svst1_scatter_s64index_f64(data, simd_cast(pg), base, indices) +pub unsafe fn svundef4_f32() -> svfloat32x4_t { + simd_reinterpret(()) } - -#[doc = "Scatter store"] +#[doc = "Create an uninitialized tuple of four vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_s32index]_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f64)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_scatter_s32index_s32( - pg: svbool_t, - base: *mut i32, - indices: svint32_t, - data: svint32_t, -) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.scatter.index.nxv4i32")] - fn _svst1_scatter_s32index_s32(data: svint32_t, pg: svbool4_t, base: *mut i32, indices: svint32_t); - } - _svst1_scatter_s32index_s32(data, simd_cast(pg), base, indices) +pub unsafe fn svundef4_f64() -> svfloat64x4_t { + simd_reinterpret(()) } - -#[doc = "Scatter store"] +#[doc = "Create an uninitialized tuple of four vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_s64index]_s64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s8)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_scatter_s64index_s64( - pg: svbool_t, - base: *mut i64, - indices: svint64_t, - data: svint64_t, -) { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i64")] - fn _svst1_scatter_s64index_s64(data: svint64_t, pg: svbool2_t, base: *mut i64, indices: svint64_t); - } - _svst1_scatter_s64index_s64(data, simd_cast(pg), base, indices) +pub unsafe fn svundef4_s8() -> svint8x4_t { + simd_reinterpret(()) } - -#[doc = "Scatter store"] +#[doc = "Create an uninitialized tuple of four vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32index]_u32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s16)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1w))] -pub unsafe fn svst1_scatter_u32index_u32( - pg: svbool_t, - base: *mut u32, - indices: svuint32_t, - data: svuint32_t, -) { - unsafe { - svst1_scatter_s32index_s32(pg, base.as_signed(), indices.as_signed(), data.as_signed()) - } +pub unsafe fn svundef4_s16() -> svint16x4_t { + simd_reinterpret(()) } - -#[doc = "Scatter store"] +#[doc = "Create an uninitialized tuple of four vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64index]_u64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s32)"] #[doc = ""] #[doc = "## Safety"] -#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(st1d))] -pub unsafe fn svst1_scatter_u64index_u64( - pg: svbool_t, - base: *mut u64, - indices: svuint64_t, - data: svuint64_t, -) { - unsafe { - svst1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) - } +pub unsafe fn svundef4_s32() -> svint32x4_t { + simd_reinterpret(()) } - -// ============================================================================ -// Additional SVE intrinsics generated based on ARM documentation and test files -// ============================================================================ - -#[doc = "Add across vector"] +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s64)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda_f16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadda_f16(pg: svbool_t, initial: f16, op: svfloat16_t) -> f16 { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.fadda.nxv8f16" - )] - fn _svadda_f16(pg: svbool8_t, initial: f16, op: svfloat16_t) -> f16; - } - unsafe { _svadda_f16(simd_cast(pg), initial, op) } +pub unsafe fn svundef4_s64() -> svint64x4_t { + simd_reinterpret(()) } -#[doc = "Add across vector"] +#[doc = "Create an uninitialized tuple of four vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda_f32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u8)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadda_f32(pg: svbool_t, initial: f32, op: svfloat32_t) -> f32 { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.fadda.nxv4f32" - )] - fn _svadda_f32(pg: svbool4_t, initial: f32, op: svfloat32_t) -> f32; - } - unsafe { _svadda_f32(simd_cast(pg), initial, op) } +pub unsafe fn svundef4_u8() -> svuint8x4_t { + simd_reinterpret(()) } -#[doc = "Add across vector"] +#[doc = "Create an uninitialized tuple of four vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda_f64)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u16)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadda_f64(pg: svbool_t, initial: f64, op: svfloat64_t) -> f64 { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.fadda.nxv2f64" - )] - fn _svadda_f64(pg: svbool2_t, initial: f64, op: svfloat64_t) -> f64; - } - unsafe { _svadda_f64(simd_cast(pg), initial, op) } +pub unsafe fn svundef4_u16() -> svuint16x4_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized tuple of four vectors"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb_u32base_s32offset)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrb_u32base_s32offset(bases: svuint32_t, offsets: svint32_t) -> svuint32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.adrb.nxv4i32" - )] - fn _svadrb_u32base_s32offset(bases: svint32_t, offsets: svint32_t) -> svint32_t; - } - unsafe { _svadrb_u32base_s32offset(bases.as_signed(), offsets).as_unsigned() } +pub unsafe fn svundef4_u32() -> svuint32x4_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u64)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb_u32base_u32offset)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrb_u32base_u32offset(bases: svuint32_t, offsets: svuint32_t) -> svuint32_t { - unsafe { svadrb_u32base_s32offset(bases, offsets.as_signed()) } +pub unsafe fn svundef4_u64() -> svuint64x4_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb_u64base_s64offset)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrb_u64base_s64offset(bases: svuint64_t, offsets: svint64_t) -> svuint64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.adrb.nxv2i64" - )] - fn _svadrb_u64base_s64offset(bases: svint64_t, offsets: svint64_t) -> svint64_t; - } - unsafe { _svadrb_u64base_s64offset(bases.as_signed(), offsets).as_unsigned() } +pub unsafe fn svundef_f32() -> svfloat32_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f64)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb_u64base_u64offset)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrb_u64base_u64offset(bases: svuint64_t, offsets: svuint64_t) -> svuint64_t { - unsafe { svadrb_u64base_s64offset(bases, offsets.as_signed()) } +pub unsafe fn svundef_f64() -> svfloat64_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd_u32base_s32index)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s8)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrd_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.adrd.nxv4i32" - )] - fn _svadrd_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; - } - unsafe { _svadrd_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +pub unsafe fn svundef_s8() -> svint8_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s16)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd_u32base_u32index)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrd_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { - unsafe { svadrd_u32base_s32index(bases, indices.as_signed()) } +pub unsafe fn svundef_s16() -> svint16_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd_u64base_s64index)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrd_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.adrd.nxv2i64" - )] - fn _svadrd_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; - } - unsafe { _svadrd_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +pub unsafe fn svundef_s32() -> svint32_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s64)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd_u64base_u64index)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrd_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { - unsafe { svadrd_u64base_s64index(bases, indices.as_signed()) } +pub unsafe fn svundef_s64() -> svint64_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh_u32base_s32index)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u8)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrh_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.adrh.nxv4i32" - )] - fn _svadrh_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; - } - unsafe { _svadrh_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +pub unsafe fn svundef_u8() -> svuint8_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u16)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh_u32base_u32index)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrh_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { - unsafe { svadrh_u32base_s32index(bases, indices.as_signed()) } +pub unsafe fn svundef_u16() -> svuint16_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized vector"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh_u64base_s64index)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrh_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.adrh.nxv2i64" - )] - fn _svadrh_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; - } - unsafe { _svadrh_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +pub unsafe fn svundef_u32() -> svuint32_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Create an uninitialized vector"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u64)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh_u64base_u64index)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrh_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { - unsafe { svadrh_u64base_s64index(bases, indices.as_signed()) } +pub unsafe fn svundef_u64() -> svuint64_t { + simd_reinterpret(()) } -#[doc = "Address calculation"] +#[doc = "Dot product (unsigned × signed)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw_u32base_s32index)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot_lane[_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrw_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usdot, IMM_INDEX = 0))] +pub fn svusdot_lane_s32( + op1: svint32_t, + op2: svuint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); unsafe extern "C" { #[cfg_attr( target_arch = "aarch64", - link_name = "llvm.aarch64.sve.adrw.nxv4i32" + link_name = "llvm.aarch64.sve.usdot.lane.nxv4i32" )] - fn _svadrw_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + fn _svusdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; } - unsafe { _svadrw_u32base_s32index(bases.as_signed(), indices).as_unsigned() } + unsafe { _svusdot_lane_s32(op1, op2.as_signed(), op3, IMM_INDEX) } } -#[doc = "Address calculation"] +#[doc = "Dot product (unsigned × signed)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw_u32base_u32index)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrw_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { - unsafe { svadrw_u32base_s32index(bases, indices.as_signed()) } +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svusdot_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usdot.nxv4i32")] + fn _svusdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svusdot_s32(op1, op2.as_signed(), op3) } } -#[doc = "Address calculation"] +#[doc = "Dot product (unsigned × signed)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw_u64base_s64index)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_n_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrw_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { - unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.adrw.nxv2i64" - )] - fn _svadrw_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; - } - unsafe { _svadrw_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svusdot_n_s32(op1: svint32_t, op2: svuint8_t, op3: i8) -> svint32_t { + svusdot_s32(op1, op2, svdup_n_s8(op3)) } -#[doc = "Address calculation"] +#[doc = "Matrix multiply-accumulate (unsigned × signed)"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw_u64base_u64index)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusmmla[_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svadrw_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { - unsafe { svadrw_u64base_s64index(bases, indices.as_signed()) } +#[target_feature(enable = "sve,i8mm")] +#[cfg_attr(test, assert_instr(usmmla))] +pub fn svusmmla_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usmmla.nxv4i32")] + fn _svusmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svusmmla_s32(op1, op2.as_signed(), op3) } } -#[doc = "Compare equal (wide)"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide_s8)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b8)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.cmpeq.wide.nxv16i8" - )] - fn _svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i1")] + fn _svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; } - unsafe { _svcmpeq_wide_s8(pg, op1, op2) } + unsafe { _svuzp1_b8(op1, op2) } } -#[doc = "Compare equal (wide)"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide_s16)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b16)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svcmpeq_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.cmpeq.wide.nxv8i16" - )] - fn _svcmpeq_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i1")] + fn _svuzp1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; } - unsafe { simd_cast(_svcmpeq_wide_s16(simd_cast(pg), op1, op2)) } + unsafe { _svuzp1_b16(op1.into(), op2.into()).into() } } -#[doc = "Compare equal (wide)"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide_s32)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b32)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -pub unsafe fn svcmpeq_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr( - target_arch = "aarch64", - link_name = "llvm.aarch64.sve.cmpeq.wide.nxv4i32" - )] - fn _svcmpeq_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i1")] + fn _svuzp1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; } - unsafe { simd_cast(_svcmpeq_wide_s32(simd_cast(pg), op1, op2)) } + unsafe { _svuzp1_b32(op1.into(), op2.into()).into() } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b64)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqsub))] -pub fn svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.x.nxv16i8")] - fn _svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i1")] + fn _svuzp1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; } - unsafe { _svqsub_s8(op1, op2) } + unsafe { _svuzp1_b64(op1.into(), op2.into()).into() } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqsub))] -pub fn svqsub_n_s8(op1: svint8_t, op2: i8) -> svint8_t { - svqsub_s8(op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4f32")] + fn _svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp1_f32(op1, op2) } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqsub))] -pub fn svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.x.nxv8i16")] - fn _svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2f64")] + fn _svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; } - unsafe { _svqsub_s16(op1, op2) } + unsafe { _svuzp1_f64(op1, op2) } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqsub))] -pub fn svqsub_n_s16(op1: svint16_t, op2: i16) -> svint16_t { - svqsub_s16(op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i8")] + fn _svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp1_s8(op1, op2) } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqsub))] -pub fn svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.x.nxv4i32")] - fn _svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i16")] + fn _svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { _svqsub_s32(op1, op2) } + unsafe { _svuzp1_s16(op1, op2) } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqsub))] -pub fn svqsub_n_s32(op1: svint32_t, op2: i32) -> svint32_t { - svqsub_s32(op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i32")] + fn _svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp1_s32(op1, op2) } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqsub))] -pub fn svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.x.nxv2i64")] - fn _svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i64")] + fn _svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svqsub_s64(op1, op2) } + unsafe { _svuzp1_s64(op1, op2) } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqsub))] -pub fn svqsub_n_s64(op1: svint64_t, op2: i64) -> svint64_t { - svqsub_s64(op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqsub))] -pub fn svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.x.nxv16i8")] - fn _svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t; - } - unsafe { _svqsub_u8(op1, op2) } +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqsub))] -pub fn svqsub_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { - svqsub_u8(op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqsub))] -pub fn svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.x.nxv8i16")] - fn _svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4f32")] + fn _svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; } - unsafe { _svqsub_u16(op1, op2) } + unsafe { _svuzp1q_f32(op1, op2) } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqsub))] -pub fn svqsub_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { - svqsub_u16(op1, svdup_n_u16(op2)) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2f64")] + fn _svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp1q_f64(op1, op2) } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s8])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqsub))] -pub fn svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.x.nxv4i32")] - fn _svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv16i8")] + fn _svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svqsub_u32(op1, op2) } + unsafe { _svuzp1q_s8(op1, op2) } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s16])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqsub))] -pub fn svqsub_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { - svqsub_u32(op1, svdup_n_u32(op2)) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv8i16")] + fn _svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp1q_s16(op1, op2) } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqsub))] -pub fn svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4i32")] + fn _svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp1q_s32(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.x.nxv2i64")] - fn _svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2i64")] + fn _svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svqsub_u64(op1, op2) } + unsafe { _svuzp1q_s64(op1, op2) } } -#[doc = "Saturating subtract"] +#[doc = "Concatenate even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u8])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqsub))] -pub fn svqsub_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { - svqsub_u64(op1, svdup_n_u64(op2)) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u16])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv4f32")] - fn _svsubr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; - } - unsafe { _svsubr_f32_m(simd_cast(pg), op1, op2) } +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsubr_f32_m(pg, op1, svdup_n_f32(op2)) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate even quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svsubr_f32_m(pg, op1, op2) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b8)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsubr_f32_x(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i1")] + fn _svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svuzp2_b8(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b16)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { - svsubr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i1")] + fn _svuzp2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svuzp2_b16(op1.into(), op2.into()).into() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b32)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { - svsubr_f32_z(pg, op1, svdup_n_f32(op2)) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i1")] + fn _svuzp2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svuzp2_b32(op1.into(), op2.into()).into() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b64)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv2f64")] - fn _svsubr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i1")] + fn _svuzp2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; } - unsafe { _svsubr_f64_m(simd_cast(pg), op1, op2) } + unsafe { _svuzp2_b64(op1.into(), op2.into()).into() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsubr_f64_m(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4f32")] + fn _svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp2_f32(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svsubr_f64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2f64")] + fn _svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp2_f64(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsubr_f64_x(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i8")] + fn _svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp2_s8(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { - svsubr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i16")] + fn _svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp2_s16(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(fsubr))] -pub fn svsubr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { - svsubr_f64_z(pg, op1, svdup_n_f64(op2)) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i32")] + fn _svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp2_s32(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv16i8")] - fn _svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i64")] + fn _svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svsubr_s8_m(pg, op1, op2) } + unsafe { _svuzp2_s64(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsubr_s8_m(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svsubr_s8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsubr_s8_x(pg, op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd elements from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { - svsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { - svsubr_s8_z(pg, op1, svdup_n_s8(op2)) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4f32")] + fn _svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp2q_f32(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv8i16")] - fn _svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2f64")] + fn _svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; } - unsafe { _svsubr_s16_m(pg, op1, op2) } + unsafe { _svuzp2q_f64(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s8])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsubr_s16_m(pg, op1, svdup_n_s16(op2)) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv16i8")] + fn _svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp2q_s8(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s16])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svsubr_s16_m(pg, op1, op2) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv8i16")] + fn _svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp2q_s16(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsubr_s16_x(pg, op1, svdup_n_s16(op2)) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4i32")] + fn _svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp2q_s32(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { - svsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2i64")] + fn _svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp2q_s64(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u8])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { - svsubr_s16_z(pg, op1, svdup_n_s16(op2)) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u16])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv4i32")] - fn _svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t; - } - unsafe { _svsubr_s32_m(pg, op1, op2) } +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsubr_s32_m(pg, op1, svdup_n_s32(op2)) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Subtract reversed"] +#[doc = "Concatenate odd quadwords from two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svsubr_s32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv16i1.i32" + )] + fn _svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilele_b8_s32(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsubr_s32_x(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv8i1.i32" + )] + fn _svwhilele_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilele_b16_s32(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { - svsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv4i1.i32" + )] + fn _svwhilele_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilele_b32_s32(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { - svsubr_s32_z(pg, op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv2i1.i32" + )] + fn _svwhilele_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilele_b64_s32(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv2i64")] - fn _svsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv16i1.i64" + )] + fn _svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t; } - unsafe { _svsubr_s64_m(simd_cast(pg), op1, op2) } + unsafe { _svwhilele_b8_s64(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsubr_s64_m(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv8i1.i64" + )] + fn _svwhilele_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilele_b16_s64(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svsubr_s64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv4i1.i64" + )] + fn _svwhilele_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilele_b32_s64(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsubr_s64_x(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv2i1.i64" + )] + fn _svwhilele_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilele_b64_s64(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { - svsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv16i1.i32" + )] + fn _svwhilele_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilele_b8_u32(op1.as_signed(), op2.as_signed()) } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { - svsubr_s64_z(pg, op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv8i1.i32" + )] + fn _svwhilele_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilele_b16_u32(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - let op1_s: svint8_t = unsafe { core::mem::transmute(op1) }; - let op2_s: svint8_t = unsafe { core::mem::transmute(op2) }; - - let res_s: svint8_t = svsubr_s8_m(pg, op1_s, op2_s); - - unsafe { core::mem::transmute::(res_s) } +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv4i1.i32" + )] + fn _svwhilele_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilele_b32_u32(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsubr_u8_m(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv2i1.i32" + )] + fn _svwhilele_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilele_b64_u32(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svsubr_u8_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv16i1.i64" + )] + fn _svwhilele_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilele_b8_u64(op1.as_signed(), op2.as_signed()) } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsubr_u8_x(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv8i1.i64" + )] + fn _svwhilele_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilele_b16_u64(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { - svsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv4i1.i64" + )] + fn _svwhilele_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilele_b32_u64(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than or equal to"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { - svsubr_u8_z(pg, op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv2i1.i64" + )] + fn _svwhilele_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilele_b64_u64(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - let op1_s: svint16_t = unsafe { core::mem::transmute(op1) }; - let op2_s: svint16_t = unsafe { core::mem::transmute(op2) }; - let res_s: svint16_t = svsubr_s16_m(pg, op1_s, op2_s); - unsafe { core::mem::transmute::(res_s) } +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i32" + )] + fn _svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilelt_b8_s32(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsubr_u16_m(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i32" + )] + fn _svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilelt_b16_s32(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svsubr_u16_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i32" + )] + fn _svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilelt_b32_s32(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsubr_u16_x(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i32" + )] + fn _svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilelt_b64_s32(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - svsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i64" + )] + fn _svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilelt_b8_s64(op1, op2) } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { - svsubr_u16_z(pg, op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i64" + )] + fn _svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilelt_b16_s64(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - let op1_s: svint32_t = unsafe { core::mem::transmute(op1) }; - let op2_s: svint32_t = unsafe { core::mem::transmute(op2) }; - let res_s: svint32_t = svsubr_s32_m(pg, op1_s, op2_s); - unsafe { core::mem::transmute::(res_s) } +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i64" + )] + fn _svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilelt_b32_s64(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsubr_u32_m(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i64" + )] + fn _svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilelt_b64_s64(op1, op2).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svsubr_u32_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i32" + )] + fn _svwhilelt_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilelt_b8_u32(op1.as_signed(), op2.as_signed()) } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsubr_u32_x(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i32" + )] + fn _svwhilelt_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilelt_b16_u32(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { - svsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i32" + )] + fn _svwhilelt_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilelt_b32_u32(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { - svsubr_u32_z(pg, op1, svdup_n_u32(op2)) +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i32" + )] + fn _svwhilelt_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilelt_b64_u32(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - let op1_s: svint64_t = unsafe { core::mem::transmute(op1) }; - let op2_s: svint64_t = unsafe { core::mem::transmute(op2) }; - let res_s: svint64_t = svsubr_s64_m(pg, op1_s, op2_s); - unsafe { core::mem::transmute::(res_s) } +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i64" + )] + fn _svwhilelt_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilelt_b8_u64(op1.as_signed(), op2.as_signed()) } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_m)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsubr_u64_m(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i64" + )] + fn _svwhilelt_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilelt_b16_u64(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svsubr_u64_m(pg, op1, op2) +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i64" + )] + fn _svwhilelt_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilelt_b32_u64(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "While incrementing scalar is less than"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_x)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsubr_u64_x(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i64" + )] + fn _svwhilelt_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilelt_b64_u64(op1.as_signed(), op2.as_signed()).into() } } -#[doc = "Subtract reversed"] +#[doc = "Write to the first-fault register"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwrffr)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { - svsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +#[cfg_attr(test, assert_instr(wrffr))] +pub fn svwrffr(op: svbool_t) { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.wrffr")] + fn _svwrffr(op: svbool_t); + } + unsafe { _svwrffr(op) } } -#[doc = "Subtract reversed"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_z)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b8)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(subr))] -pub fn svsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { - svsubr_u64_z(pg, op1, svdup_n_u64(op2)) +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i1")] + fn _svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svzip1_b8(op1, op2) } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b16)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqadd))] -pub fn svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.x.nxv16i8")] - fn _svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i1")] + fn _svzip1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; } - unsafe { _svqadd_s8(op1, op2) } + unsafe { _svzip1_b16(op1.into(), op2.into()).into() } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b32)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqadd))] -pub fn svqadd_n_s8(op1: svint8_t, op2: i8) -> svint8_t { - svqadd_s8(op1, svdup_n_s8(op2)) +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i1")] + fn _svzip1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svzip1_b32(op1.into(), op2.into()).into() } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b64)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqadd))] -pub fn svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.x.nxv8i16")] - fn _svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i1")] + fn _svzip1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; } - unsafe { _svqadd_s16(op1, op2) } + unsafe { _svzip1_b64(op1.into(), op2.into()).into() } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqadd))] -pub fn svqadd_n_s16(op1: svint16_t, op2: i16) -> svint16_t { - svqadd_s16(op1, svdup_n_s16(op2)) +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4f32")] + fn _svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip1_f32(op1, op2) } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqadd))] -pub fn svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.x.nxv4i32")] - fn _svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2f64")] + fn _svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; } - unsafe { _svqadd_s32(op1, op2) } + unsafe { _svzip1_f64(op1, op2) } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqadd))] -pub fn svqadd_n_s32(op1: svint32_t, op2: i32) -> svint32_t { - svqadd_s32(op1, svdup_n_s32(op2)) +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i8")] + fn _svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip1_s8(op1, op2) } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqadd))] -pub fn svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.x.nxv2i64")] - fn _svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i16")] + fn _svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { _svqadd_s64(op1, op2) } + unsafe { _svzip1_s16(op1, op2) } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(sqadd))] -pub fn svqadd_n_s64(op1: svint64_t, op2: i64) -> svint64_t { - svqadd_s64(op1, svdup_n_s64(op2)) +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i32")] + fn _svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip1_s32(op1, op2) } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqadd))] -pub fn svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.x.nxv16i8")] - fn _svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i64")] + fn _svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svqadd_u8(op1, op2) } + unsafe { _svzip1_s64(op1, op2) } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqadd))] -pub fn svqadd_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { - svqadd_u8(op1, svdup_n_u8(op2)) +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqadd))] -pub fn svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { - unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.x.nxv8i16")] - fn _svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t; - } - unsafe { _svqadd_u16(op1, op2) } +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqadd))] -pub fn svqadd_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { - svqadd_u16(op1, svdup_n_u16(op2)) +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Saturating add"] +#[doc = "Interleave elements from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqadd))] -pub fn svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.x.nxv4i32")] - fn _svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4f32")] + fn _svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; } - unsafe { _svqadd_u32(op1, op2) } + unsafe { _svzip1q_f32(op1, op2) } } -#[doc = "Saturating add"] +#[doc = "Interleave quadwords from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqadd))] -pub fn svqadd_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { - svqadd_u32(op1, svdup_n_u32(op2)) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2f64")] + fn _svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip1q_f64(op1, op2) } } -#[doc = "Saturating add"] +#[doc = "Interleave quadwords from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s8])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqadd))] -pub fn svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.x.nxv2i64")] - fn _svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv16i8")] + fn _svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svqadd_u64(op1, op2) } + unsafe { _svzip1q_s8(op1, op2) } } -#[doc = "Saturating add"] +#[doc = "Interleave quadwords from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s16])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(uqadd))] -pub fn svqadd_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { - svqadd_u64(op1, svdup_n_u64(op2)) +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv8i16")] + fn _svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip1q_s16(op1, op2) } } -#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "Interleave quadwords from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(compact))] -pub fn svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.compact.nxv4f32")] - fn _svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4i32")] + fn _svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; } - unsafe { _svcompact_f32(pg, op) } + unsafe { _svzip1q_s32(op1, op2) } } -#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "Interleave quadwords from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(compact))] -pub fn svcompact_f64(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.compact.nxv2f64")] - fn _svcompact_f64(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2i64")] + fn _svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svcompact_f64(simd_cast(pg), op) } + unsafe { _svzip1q_s64(op1, op2) } } -#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "Interleave quadwords from low halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u8])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u16])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u32])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u64])"] +#[inline] +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b8)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(compact))] -pub fn svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t { +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.compact.nxv4i32")] - fn _svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i1")] + fn _svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; } - unsafe { _svcompact_s32(pg, op) } + unsafe { _svzip2_b8(op1, op2) } } -#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b16)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(compact))] -pub fn svcompact_s64(pg: svbool_t, op: svint64_t) -> svint64_t { +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.compact.nxv2i64")] - fn _svcompact_s64(pg: svbool2_t, op: svint64_t) -> svint64_t; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i1")] + fn _svzip2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; } - unsafe { _svcompact_s64(simd_cast(pg), op) } + unsafe { _svzip2_b16(op1.into(), op2.into()).into() } } -#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b32)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(compact))] -pub fn svcompact_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { - let op_s: svint32_t = unsafe { core::mem::transmute(op) }; - let res_s: svint32_t = svcompact_s32(pg, op_s); - unsafe { core::mem::transmute::(res_s) } +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i1")] + fn _svzip2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svzip2_b32(op1.into(), op2.into()).into() } } -#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b64)"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(compact))] -pub fn svcompact_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { - let op_s: svint64_t = unsafe { core::mem::transmute(op) }; - let res_s: svint64_t = svcompact_s64(pg, op_s); - unsafe { core::mem::transmute::(res_s) } +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i1")] + fn _svzip2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svzip2_b64(op1.into(), op2.into()).into() } } -#[doc = "Extract element after last"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lasta))] -pub fn svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32 { +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4f32")] - fn _svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4f32")] + fn _svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; } - unsafe { _svlasta_f32(pg, op) } + unsafe { _svzip2_f32(op1, op2) } } -#[doc = "Extract element after last"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lasta))] -pub fn svlasta_f64(pg: svbool_t, op: svfloat64_t) -> f64 { +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2f64")] - fn _svlasta_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2f64")] + fn _svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; } - unsafe { _svlasta_f64(simd_cast(pg), op) } + unsafe { _svzip2_f64(op1, op2) } } -#[doc = "Extract element after last"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lasta))] -pub fn svlasta_s8(pg: svbool_t, op: svint8_t) -> i8 { +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv16i8")] - fn _svlasta_s8(pg: svbool_t, op: svint8_t) -> i8; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i8")] + fn _svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svlasta_s8(pg, op) } + unsafe { _svzip2_s8(op1, op2) } } -#[doc = "Extract element after last"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lasta))] -pub fn svlasta_s16(pg: svbool_t, op: svint16_t) -> i16 { +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv8i16")] - fn _svlasta_s16(pg: svbool_t, op: svint16_t) -> i16; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i16")] + fn _svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { _svlasta_s16(pg, op) } + unsafe { _svzip2_s16(op1, op2) } } -#[doc = "Extract element after last"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lasta))] -pub fn svlasta_s32(pg: svbool_t, op: svint32_t) -> i32 { +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4i32")] - fn _svlasta_s32(pg: svbool_t, op: svint32_t) -> i32; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i32")] + fn _svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; } - unsafe { _svlasta_s32(pg, op) } + unsafe { _svzip2_s32(op1, op2) } } -#[doc = "Extract element after last"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lasta))] -pub fn svlasta_s64(pg: svbool_t, op: svint64_t) -> i64 { +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2i64")] - fn _svlasta_s64(pg: svbool2_t, op: svint64_t) -> i64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i64")] + fn _svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svlasta_s64(simd_cast(pg), op) } + unsafe { _svzip2_s64(op1, op2) } } -#[doc = "Extract element after last"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u8])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lasta))] -pub fn svlasta_u8(pg: svbool_t, op: svuint8_t) -> u8 { - let op_s: svint8_t = unsafe { core::mem::transmute(op) }; - let res_s: i8 = svlasta_s8(pg, op_s); - unsafe { core::mem::transmute::(res_s) } +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Extract element after last"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u16])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lasta))] -pub fn svlasta_u16(pg: svbool_t, op: svuint16_t) -> u16 { - let op_s: svint16_t = unsafe { core::mem::transmute(op) }; - let res_s: i16 = svlasta_s16(pg, op_s); - unsafe { core::mem::transmute::(res_s) } +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Extract element after last"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u32])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lasta))] -pub fn svlasta_u32(pg: svbool_t, op: svuint32_t) -> u32 { - let op_s: svint32_t = unsafe { core::mem::transmute(op) }; - let res_s: i32 = svlasta_s32(pg, op_s); - unsafe { core::mem::transmute::(res_s) } +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Extract element after last"] +#[doc = "Interleave elements from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u64])"] #[inline] #[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lasta))] -pub fn svlasta_u64(pg: svbool_t, op: svuint64_t) -> u64 { - let op_s: svint64_t = unsafe { core::mem::transmute(op) }; - let res_s: i64 = svlasta_s64(pg, op_s); - unsafe { core::mem::transmute::(res_s) } +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Extract last element"] +#[doc = "Interleave quadwords from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lastb))] -pub fn svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32 { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4f32")] - fn _svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4f32")] + fn _svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; } - unsafe { _svlastb_f32(pg, op) } + unsafe { _svzip2q_f32(op1, op2) } } -#[doc = "Extract last element"] +#[doc = "Interleave quadwords from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lastb))] -pub fn svlastb_f64(pg: svbool_t, op: svfloat64_t) -> f64 { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2f64")] - fn _svlastb_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2f64")] + fn _svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; } - unsafe { _svlastb_f64(simd_cast(pg), op) } + unsafe { _svzip2q_f64(op1, op2) } } -#[doc = "Extract last element"] +#[doc = "Interleave quadwords from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s8])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lastb))] -pub fn svlastb_s8(pg: svbool_t, op: svint8_t) -> i8 { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv16i8")] - fn _svlastb_s8(pg: svbool_t, op: svint8_t) -> i8; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv16i8")] + fn _svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; } - unsafe { _svlastb_s8(pg, op) } + unsafe { _svzip2q_s8(op1, op2) } } -#[doc = "Extract last element"] +#[doc = "Interleave quadwords from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s16])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lastb))] -pub fn svlastb_s16(pg: svbool_t, op: svint16_t) -> i16 { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv8i16")] - fn _svlastb_s16(pg: svbool_t, op: svint16_t) -> i16; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv8i16")] + fn _svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; } - unsafe { _svlastb_s16(pg, op) } + unsafe { _svzip2q_s16(op1, op2) } } -#[doc = "Extract last element"] +#[doc = "Interleave quadwords from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lastb))] -pub fn svlastb_s32(pg: svbool_t, op: svint32_t) -> i32 { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4i32")] - fn _svlastb_s32(pg: svbool_t, op: svint32_t) -> i32; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4i32")] + fn _svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; } - unsafe { _svlastb_s32(pg, op) } + unsafe { _svzip2q_s32(op1, op2) } } -#[doc = "Extract last element"] +#[doc = "Interleave quadwords from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lastb))] -pub fn svlastb_s64(pg: svbool_t, op: svint64_t) -> i64 { +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { unsafe extern "C" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2i64")] - fn _svlastb_s64(pg: svbool2_t, op: svint64_t) -> i64; + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2i64")] + fn _svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; } - unsafe { _svlastb_s64(simd_cast(pg), op) } + unsafe { _svzip2q_s64(op1, op2) } } -#[doc = "Extract last element"] +#[doc = "Interleave quadwords from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u8])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u8])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lastb))] -pub fn svlastb_u8(pg: svbool_t, op: svuint8_t) -> u8 { - let op_s: svint8_t = unsafe { core::mem::transmute(op) }; - let res_s: i8 = svlastb_s8(pg, op_s); - unsafe { core::mem::transmute::(res_s) } +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Extract last element"] +#[doc = "Interleave quadwords from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u16])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u16])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lastb))] -pub fn svlastb_u16(pg: svbool_t, op: svuint16_t) -> u16 { - let op_s: svint16_t = unsafe { core::mem::transmute(op) }; - let res_s: i16 = svlastb_s16(pg, op_s); - unsafe { core::mem::transmute::(res_s) } +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Extract last element"] +#[doc = "Interleave quadwords from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u32])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u32])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lastb))] -pub fn svlastb_u32(pg: svbool_t, op: svuint32_t) -> u32 { - let op_s: svint32_t = unsafe { core::mem::transmute(op) }; - let res_s: i32 = svlastb_s32(pg, op_s); - unsafe { core::mem::transmute::(res_s) } +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } } -#[doc = "Extract last element"] +#[doc = "Interleave quadwords from high halves of two inputs"] #[doc = ""] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u64])"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u64])"] #[inline] -#[target_feature(enable = "sve")] -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[cfg_attr(test, assert_instr(lastb))] -pub fn svlastb_u64(pg: svbool_t, op: svuint64_t) -> u64 { - let op_s: svint64_t = unsafe { core::mem::transmute(op) }; - let res_s: i64 = svlastb_s64(pg, op_s); - unsafe { core::mem::transmute::(res_s) } +#[target_feature(enable = "sve,f64mm")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } } diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs index a3dd5936d13a1..9c27ae2226a0b 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs @@ -722,6 +722,31 @@ impl svprfop { pub const fn as_raw(self) -> u8 { self.0 } + + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL1KEEP: svprfop = svprfop(0); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL1STRM: svprfop = svprfop(1); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL2KEEP: svprfop = svprfop(2); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL2STRM: svprfop = svprfop(3); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL3KEEP: svprfop = svprfop(4); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PLDL3STRM: svprfop = svprfop(5); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL1KEEP: svprfop = svprfop(8); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL1STRM: svprfop = svprfop(9); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL2KEEP: svprfop = svprfop(10); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL2STRM: svprfop = svprfop(11); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL3KEEP: svprfop = svprfop(12); + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] + pub const SV_PSTL3STRM: svprfop = svprfop(13); } // ============================================================================ @@ -920,6 +945,36 @@ impl svbool8_t { } } +// ============================================================================ +// From trait 实现 - 用于生成的代码中的 .into() 调用 +// ============================================================================ +// 注意:这些实现不使用 target_feature,因为 From trait 不能有该属性 +// 类型转换本身是安全的,不涉及实际的 SIMD 操作 + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool2_t { + #[inline(always)] + fn from(x: svbool_t) -> Self { + unsafe { simd_cast(x) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool4_t { + #[inline(always)] + fn from(x: svbool_t) -> Self { + unsafe { simd_cast(x) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool8_t { + #[inline(always)] + fn from(x: svbool_t) -> Self { + unsafe { simd_cast(x) } + } +} + // ============================================================================ // 类型转换 Trait - 用于生成的代码 // ============================================================================ @@ -1456,3 +1511,46 @@ impl AsSigned for svint64x4_t { } // ============================================================================ +// LLVM 类型别名 - 用于生成的代码 +// ============================================================================ +// 这些类型别名将 LLVM 机器表示映射到 Rust 类型,用于代码生成器生成的代码 + +// 有符号整数类型别名 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv8i8 = svint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv4i8 = svint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv4i16 = svint16_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2i8 = svint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2i16 = svint16_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2i32 = svint32_t; + +// 无符号整数类型别名 +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv8u8 = svuint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv4u8 = svuint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv4u16 = svuint16_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2u8 = svuint8_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2u16 = svuint16_t; + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub type nxv2u32 = svuint32_t; + +// ============================================================================ From fe5baaa522536f35293caf89413300f6c38cd2d0 Mon Sep 17 00:00:00 2001 From: wxh Date: Mon, 24 Nov 2025 10:07:54 +0800 Subject: [PATCH 24/27] Add new macros for static assertions and extend feature list in core_arch --- library/stdarch/crates/core_arch/src/lib.rs | 5 ++++- library/stdarch/crates/core_arch/src/macros.rs | 17 +++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs index c58580f641780..ab3c2a697ef34 100644 --- a/library/stdarch/crates/core_arch/src/lib.rs +++ b/library/stdarch/crates/core_arch/src/lib.rs @@ -35,7 +35,10 @@ x86_amx_intrinsics, f16, aarch64_unstable_target_feature, - bigint_helper_methods + bigint_helper_methods, + adt_const_params, + unchecked_shifts, + portable_simd )] #![cfg_attr(test, feature(test, abi_vectorcall, stdarch_internal))] #![deny(clippy::missing_inline_in_public_items)] diff --git a/library/stdarch/crates/core_arch/src/macros.rs b/library/stdarch/crates/core_arch/src/macros.rs index e00b43353679e..511a237b963d9 100644 --- a/library/stdarch/crates/core_arch/src/macros.rs +++ b/library/stdarch/crates/core_arch/src/macros.rs @@ -48,6 +48,23 @@ macro_rules! static_assert_simm_bits { }; } +#[allow(unused_macros)] +macro_rules! static_assert_range { + ($imm:ident, $min:expr, $max:expr) => { + static_assert!( + $min <= $imm && $imm <= $max, + concat!( + stringify!($imm), + " is not in the range [", + stringify!($min), + ", ", + stringify!($max), + "]", + ) + ) + }; +} + #[allow(unused)] macro_rules! types { ( From 6fef83b5e1ff445e58d48902bdf26ee0f67d031f Mon Sep 17 00:00:00 2001 From: wxh Date: Mon, 24 Nov 2025 10:28:33 +0800 Subject: [PATCH 25/27] Update SVE2 intrinsics in sve2.rs with new functions and documentation; --- .../crates/core_arch/src/aarch64/sve/sve2.rs | 24003 +++++++++++++++- 1 file changed, 24001 insertions(+), 2 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/sve2.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/sve2.rs index a1a8468910288..ebe6fede8b678 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/sve2.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/sve2.rs @@ -1,4 +1,10 @@ -// 最小化 SVE2 intrinsics - 用于编译测试 +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen2/spec/` and run the following command to re-generate this file: +// +// ``` +// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec +// ``` #![allow(improper_ctypes)] #[cfg(test)] @@ -6,4 +12,23997 @@ use stdarch_test::assert_instr; use super::*; -// SVE2 intrinsics placeholder +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv16i8")] + fn _svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svaba_s8(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svaba_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv8i16")] + fn _svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svaba_s16(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svaba_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv4i32")] + fn _svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svaba_s32(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svaba_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv2i64")] + fn _svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svaba_s64(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svaba_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv16i8")] + fn _svaba_u8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svaba_u8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svaba_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv8i16")] + fn _svaba_u16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svaba_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svaba_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv4i32")] + fn _svaba_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svaba_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svaba_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv2i64")] + fn _svaba_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svaba_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svaba_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv8i16")] + fn _svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalb_s16(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svabalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv4i32")] + fn _svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalb_s32(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svabalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv2i64")] + fn _svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalb_s64(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svabalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv8i16")] + fn _svabalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svabalb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv4i32")] + fn _svabalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svabalb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv2i64")] + fn _svabalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svabalb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv8i16")] + fn _svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalt_s16(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svabalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv4i32")] + fn _svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalt_s32(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svabalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv2i64")] + fn _svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalt_s64(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svabalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv8i16")] + fn _svabalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svabalt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv4i32")] + fn _svabalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svabalt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv2i64")] + fn _svabalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svabalt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv8i16")] + fn _svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlb_s16(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svabdlb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv4i32")] + fn _svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlb_s32(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svabdlb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv2i64")] + fn _svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlb_s64(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svabdlb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv8i16")] + fn _svabdlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svabdlb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv4i32")] + fn _svabdlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svabdlb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv2i64")] + fn _svabdlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svabdlb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv8i16")] + fn _svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlt_s16(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svabdlt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv4i32")] + fn _svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlt_s32(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svabdlt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv2i64")] + fn _svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlt_s64(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svabdlt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv8i16")] + fn _svabdlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svabdlt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv4i32")] + fn _svabdlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svabdlt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv2i64")] + fn _svabdlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svabdlt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv8i16")] + fn _svadalp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svadalp_s16_m(pg.into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + svadalp_s16_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_z(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + svadalp_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv4i32")] + fn _svadalp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svadalp_s32_m(pg.into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + svadalp_s32_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_z(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + svadalp_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv2i64")] + fn _svadalp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svadalp_s64_m(pg.into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + svadalp_s64_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_z(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + svadalp_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv8i16")] + fn _svadalp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svadalp_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + svadalp_u16_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + svadalp_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv4i32")] + fn _svadalp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svadalp_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + svadalp_u32_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + svadalp_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv2i64")] + fn _svadalp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svadalp_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + svadalp_u64_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + svadalp_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Add with carry long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv4i32")] + fn _svadclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svadclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svadclb_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Add with carry long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv2i64")] + fn _svadclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svadclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svadclb_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Add with carry long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv4i32")] + fn _svadclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svadclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svadclt_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Add with carry long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv2i64")] + fn _svadclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svadclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svadclt_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv8i16")] + fn _svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svaddhnb_s16(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svaddhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv4i32")] + fn _svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svaddhnb_s32(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svaddhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv2i64")] + fn _svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svaddhnb_s64(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svaddhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svaddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svaddhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svaddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svaddhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svaddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svaddhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv8i16")] + fn _svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svaddhnt_s16(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svaddhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv4i32")] + fn _svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svaddhnt_s32(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svaddhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv2i64")] + fn _svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svaddhnt_s64(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svaddhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svaddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svaddhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svaddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svaddhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svaddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svaddhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv8i16")] + fn _svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlb_s16(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv4i32")] + fn _svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlb_s32(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv2i64")] + fn _svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlb_s64(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv8i16")] + fn _svaddlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svaddlb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv4i32")] + fn _svaddlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svaddlb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv2i64")] + fn _svaddlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svaddlb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv8i16" + )] + fn _svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlbt_s16(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlbt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv4i32" + )] + fn _svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlbt_s32(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlbt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv2i64" + )] + fn _svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlbt_s64(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlbt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv8i16")] + fn _svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlt_s16(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv4i32")] + fn _svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlt_s32(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv2i64")] + fn _svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlt_s64(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv8i16")] + fn _svaddlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svaddlt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv4i32")] + fn _svaddlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svaddlt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv2i64")] + fn _svaddlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svaddlt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv4f32")] + fn _svaddp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svaddp_f32_m(pg.into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svaddp_f32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv2f64")] + fn _svaddp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svaddp_f64_m(pg.into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svaddp_f64_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv16i8")] + fn _svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaddp_s8_m(pg, op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svaddp_s8_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv8i16")] + fn _svaddp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svaddp_s16_m(pg.into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svaddp_s16_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv4i32")] + fn _svaddp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svaddp_s32_m(pg.into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svaddp_s32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv2i64")] + fn _svaddp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svaddp_s64_m(pg.into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svaddp_s64_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svaddp_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svaddp_u8_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svaddp_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svaddp_u16_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svaddp_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svaddp_u32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svaddp_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svaddp_u64_m(pg, op1, op2) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv8i16")] + fn _svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwb_s16(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svaddwb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv4i32")] + fn _svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwb_s32(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svaddwb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv2i64")] + fn _svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwb_s64(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svaddwb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv8i16")] + fn _svaddwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svaddwb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv4i32")] + fn _svaddwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svaddwb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv2i64")] + fn _svaddwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svaddwb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv8i16")] + fn _svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwt_s16(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svaddwt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv4i32")] + fn _svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwt_s32(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svaddwt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv2i64")] + fn _svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwt_s64(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svaddwt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv8i16")] + fn _svaddwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svaddwt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv4i32")] + fn _svaddwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svaddwt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv2i64")] + fn _svaddwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svaddwt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "AES single round decryption"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesd[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(aesd))] +pub fn svaesd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesd")] + fn _svaesd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaesd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "AES single round encryption"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaese[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(aese))] +pub fn svaese_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aese")] + fn _svaese_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaese_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "AES inverse mix columns"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesimc[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(aesimc))] +pub fn svaesimc_u8(op: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesimc")] + fn _svaesimc_u8(op: svint8_t) -> svint8_t; + } + unsafe { _svaesimc_u8(op.as_signed()).as_unsigned() } +} +#[doc = "AES mix columns"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesmc[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(aesmc))] +pub fn svaesmc_u8(op: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesmc")] + fn _svaesmc_u8(op: svint8_t) -> svint8_t; + } + unsafe { _svaesmc_u8(op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv16i8")] + fn _svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbcax_s8(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbcax_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv8i16")] + fn _svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbcax_s16(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbcax_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv4i32")] + fn _svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbcax_s32(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbcax_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv2i64")] + fn _svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbcax_s64(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbcax_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbcax_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbcax_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbcax_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbcax_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbcax_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbcax_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbcax_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbcax_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv16i8")] + fn _svbdep_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbdep_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbdep_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv8i16")] + fn _svbdep_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbdep_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbdep_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv4i32")] + fn _svbdep_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbdep_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbdep_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv2i64")] + fn _svbdep_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbdep_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbdep_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv16i8")] + fn _svbext_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbext_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbext_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv8i16")] + fn _svbext_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbext_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbext_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv4i32")] + fn _svbext_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbext_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbext_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv2i64")] + fn _svbext_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbext_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbext_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv16i8")] + fn _svbgrp_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbgrp_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbgrp_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv8i16")] + fn _svbgrp_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbgrp_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbgrp_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv4i32")] + fn _svbgrp_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbgrp_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbgrp_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv2i64")] + fn _svbgrp_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbgrp_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbgrp_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv16i8")] + fn _svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl1n_s8(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl1n_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv8i16")] + fn _svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl1n_s16(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl1n_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv4i32")] + fn _svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl1n_s32(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl1n_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv2i64")] + fn _svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl1n_s64(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl1n_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl1n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl1n_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl1n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl1n_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl1n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl1n_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl1n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl1n_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv16i8")] + fn _svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl2n_s8(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl2n_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv8i16")] + fn _svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl2n_s16(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl2n_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv4i32")] + fn _svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl2n_s32(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl2n_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv2i64")] + fn _svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl2n_s64(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl2n_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl2n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl2n_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl2n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl2n_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl2n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl2n_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl2n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl2n_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv16i8")] + fn _svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl_s8(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv8i16")] + fn _svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl_s16(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv4i32")] + fn _svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl_s32(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv2i64")] + fn _svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl_s64(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv16i8")] + fn _svcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svcadd_s8(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv8i16")] + fn _svcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t; + } + unsafe { _svcadd_s16(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv4i32")] + fn _svcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t; + } + unsafe { _svcadd_s32(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv2i64")] + fn _svcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t; + } + unsafe { _svcadd_s64(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cdot.lane.nxv4i32" + )] + fn _svcdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcdot_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cdot.lane.nxv2i64" + )] + fn _svcdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcdot_lane_s64(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] +pub fn svcdot_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv4i32")] + fn _svcdot_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcdot_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] +pub fn svcdot_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv2i64")] + fn _svcdot_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcdot_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmla.lane.x.nxv8i16" + )] + fn _svcmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svcmla_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmla.lane.x.nxv4i32" + )] + fn _svcmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcmla_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_lane_s16::( + op1.as_signed(), + op2.as_signed(), + op3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_lane_s32::( + op1.as_signed(), + op2.as_signed(), + op3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv16i8")] + fn _svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svcmla_s8(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv8i16")] + fn _svcmla_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svcmla_s16(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv4i32")] + fn _svcmla_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcmla_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv2i64")] + fn _svcmla_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcmla_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u8( + op1: svuint8_t, + op2: svuint8_t, + op3: svuint8_t, +) -> svuint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s8::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Up convert long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtlt))] +pub fn svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtlt.f64f32")] + fn _svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) + -> svfloat64_t; + } + unsafe { _svcvtlt_f64_f32_m(inactive, pg.into(), op) } +} +#[doc = "Up convert long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtlt))] +pub fn svcvtlt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe { svcvtlt_f64_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Down convert and narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtnt))] +pub fn svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtnt.f32f64")] + fn _svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtnt_f32_f64_m(even, pg.into(), op) } +} +#[doc = "Down convert and narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtnt))] +pub fn svcvtnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtnt_f32_f64_m(even, pg, op) +} +#[doc = "Down convert, rounding to odd"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtx.f32f64")] + fn _svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtx_f32_f64_m(inactive, pg.into(), op) } +} +#[doc = "Down convert, rounding to odd"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe { svcvtx_f32_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Down convert, rounding to odd"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtx_f32_f64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Down convert, rounding to odd (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtxnt))] +pub fn svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtxnt.f32f64")] + fn _svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtxnt_f32_f64_m(even, pg.into(), op) } +} +#[doc = "Down convert, rounding to odd (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fcvtxnt))] +pub fn svcvtxnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtxnt_f32_f64_m(even, pg, op) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv16i8")] + fn _sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _sveor3_s8(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + sveor3_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv8i16")] + fn _sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _sveor3_s16(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + sveor3_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv4i32")] + fn _sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _sveor3_s32(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + sveor3_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv2i64")] + fn _sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _sveor3_s64(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + sveor3_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { sveor3_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + sveor3_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { sveor3_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + sveor3_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { sveor3_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + sveor3_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { sveor3_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + sveor3_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv16i8")] + fn _sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveorbt_s8(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s8(odd: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { + sveorbt_s8(odd, op1, svdup_n_s8(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv8i16")] + fn _sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveorbt_s16(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s16(odd: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { + sveorbt_s16(odd, op1, svdup_n_s16(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv4i32")] + fn _sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveorbt_s32(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s32(odd: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { + sveorbt_s32(odd, op1, svdup_n_s32(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv2i64")] + fn _sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveorbt_s64(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s64(odd: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { + sveorbt_s64(odd, op1, svdup_n_s64(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u8(odd: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveorbt_s8(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u8(odd: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveorbt_u8(odd, op1, svdup_n_u8(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u16(odd: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveorbt_s16(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u16(odd: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveorbt_u16(odd, op1, svdup_n_u16(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u32(odd: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveorbt_s32(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u32(odd: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveorbt_u32(odd, op1, svdup_n_u32(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u64(odd: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveorbt_s64(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u64(odd: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveorbt_u64(odd, op1, svdup_n_u64(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv16i8")] + fn _sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveortb_s8(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s8(even: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { + sveortb_s8(even, op1, svdup_n_s8(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv8i16")] + fn _sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveortb_s16(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s16(even: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { + sveortb_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv4i32")] + fn _sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveortb_s32(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s32(even: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { + sveortb_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv2i64")] + fn _sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveortb_s64(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s64(even: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { + sveortb_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u8(even: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveortb_s8(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u8(even: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveortb_u8(even, op1, svdup_n_u8(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u16(even: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveortb_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u16(even: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveortb_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u32(even: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveortb_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u32(even: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveortb_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u64(even: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveortb_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u64(even: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveortb_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv16i8")] + fn _svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhadd_s8_m(pg, op1, op2) } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhadd_s8_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv8i16")] + fn _svhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhadd_s16_m(pg.into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhadd_s16_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv4i32")] + fn _svhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhadd_s32_m(pg.into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhadd_s32_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv2i64")] + fn _svhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhadd_s64_m(pg.into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhadd_s64_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv16i8")] + fn _svhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhadd_u8_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv8i16")] + fn _svhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhadd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhadd_u16_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv4i32")] + fn _svhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhadd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhadd_u32_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv2i64")] + fn _svhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhadd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhadd_u64_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Count matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histcnt.nxv4i32" + )] + fn _svhistcnt_s32_z(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhistcnt_s32_z(pg.into(), op1, op2).as_unsigned() } +} +#[doc = "Count matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histcnt.nxv2i64" + )] + fn _svhistcnt_s64_z(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhistcnt_s64_z(pg.into(), op1, op2).as_unsigned() } +} +#[doc = "Count matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svhistcnt_s32_z(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Count matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svhistcnt_s64_z(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Count matching elements in 128-bit segments"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histseg))] +pub fn svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histseg.nxv16i8" + )] + fn _svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhistseg_s8(op1, op2).as_unsigned() } +} +#[doc = "Count matching elements in 128-bit segments"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(histseg))] +pub fn svhistseg_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svhistseg_s8(op1.as_signed(), op2.as_signed()) } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv16i8")] + fn _svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsub_s8_m(pg, op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsub_s8_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv8i16")] + fn _svhsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsub_s16_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsub_s16_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv4i32")] + fn _svhsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsub_s32_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsub_s32_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv2i64")] + fn _svhsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsub_s64_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsub_s64_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv16i8")] + fn _svhsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsub_u8_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv8i16")] + fn _svhsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsub_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsub_u16_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv4i32")] + fn _svhsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsub_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsub_u32_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv2i64")] + fn _svhsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsub_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsub_u64_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv16i8")] + fn _svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsubr_s8_m(pg, op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsubr_s8_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv8i16")] + fn _svhsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsubr_s16_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsubr_s16_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv4i32")] + fn _svhsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsubr_s32_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsubr_s32_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv2i64")] + fn _svhsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsubr_s64_m(pg.into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsubr_s64_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv16i8")] + fn _svhsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsubr_u8_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv8i16")] + fn _svhsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsubr_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsubr_u16_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv4i32")] + fn _svhsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsubr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsubr_u32_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv2i64")] + fn _svhsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsubr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsubr_u64_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2f64" + )] + fn _svldnt1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svldnt1_gather_s64index_f64(pg.into(), base, indices) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i64" + )] + fn _svldnt1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svldnt1_gather_s64index_s64(pg.into(), base, indices) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svldnt1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svldnt1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svldnt1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svldnt1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2f64" + )] + fn _svldnt1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svldnt1_gather_s64offset_f64(pg.into(), base, offsets) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i64" + )] + fn _svldnt1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svldnt1_gather_s64offset_s64(pg.into(), base, offsets) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32" + )] + fn _svldnt1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldnt1_gather_u32offset_f32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32" + )] + fn _svldnt1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldnt1_gather_u32offset_s32(pg.into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svldnt1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svldnt1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svldnt1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svldnt1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svldnt1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svldnt1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_f32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svldnt1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svldnt1_gather_u32base_offset_f32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svldnt1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svldnt1_gather_u32base_offset_s32(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_f64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svldnt1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svldnt1_gather_u64base_offset_f64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svldnt1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svldnt1_gather_u64base_offset_s64(pg.into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8" + )] + fn _svldnt1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast(_svldnt1sb_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16" + )] + fn _svldnt1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svldnt1sh_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32" + )] + fn _svldnt1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svldnt1sw_gather_s64offset_s64(pg.into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8" + )] + fn _svldnt1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast(_svldnt1sb_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16" + )] + fn _svldnt1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast(_svldnt1sh_gather_u32offset_s32( + pg.into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldnt1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast(_svldnt1sb_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldnt1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast(_svldnt1sh_gather_u32base_offset_s32( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldnt1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast(_svldnt1sb_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldnt1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast(_svldnt1sh_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldnt1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast(_svldnt1sw_gather_u64base_offset_s64( + pg.into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16" + )] + fn _svldnt1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast(_svldnt1sh_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32" + )] + fn _svldnt1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast(_svldnt1sw_gather_s64index_s64(pg.into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8" + )] + fn _svldnt1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + simd_cast::( + _svldnt1ub_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16" + )] + fn _svldnt1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svldnt1uh_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32" + )] + fn _svldnt1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svldnt1uw_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svldnt1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svldnt1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8" + )] + fn _svldnt1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + simd_cast::( + _svldnt1ub_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16" + )] + fn _svldnt1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + simd_cast::( + _svldnt1uh_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldnt1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldnt1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + simd_cast::( + _svldnt1ub_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldnt1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + simd_cast::( + _svldnt1uh_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldnt1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + simd_cast::( + _svldnt1ub_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldnt1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + simd_cast::( + _svldnt1uh_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldnt1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + simd_cast::( + _svldnt1uw_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16" + )] + fn _svldnt1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + simd_cast::( + _svldnt1uh_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32" + )] + fn _svldnt1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + simd_cast::( + _svldnt1uw_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_s32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_u32)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_s64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_u64)"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv4f32")] + fn _svlogb_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svlogb_f32_m(inactive, pg.into(), op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe { svlogb_f32_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { + svlogb_f32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv2f64")] + fn _svlogb_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svlogb_f64_m(inactive, pg.into(), op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe { svlogb_f64_m(simd_reinterpret(op), pg, op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { + svlogb_f64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Detect any matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv16i8")] + fn _svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svmatch_s8(pg, op1, op2) } +} +#[doc = "Detect any matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv8i16")] + fn _svmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svmatch_s16(pg.into(), op1, op2).into() } +} +#[doc = "Detect any matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svmatch_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Detect any matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svmatch_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Maximum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmp.nxv4f32" + )] + fn _svmaxnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxnmp_f32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnmp_f32_m(pg, op1, op2) +} +#[doc = "Maximum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmp.nxv2f64" + )] + fn _svmaxnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxnmp_f64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnmp_f64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv4f32")] + fn _svmaxp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxp_f32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxp_f32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv2f64")] + fn _svmaxp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxp_f64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxp_f64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv16i8")] + fn _svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmaxp_s8_m(pg, op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmaxp_s8_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv8i16")] + fn _svmaxp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmaxp_s16_m(pg.into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmaxp_s16_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv4i32")] + fn _svmaxp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmaxp_s32_m(pg.into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmaxp_s32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv2i64")] + fn _svmaxp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmaxp_s64_m(pg.into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmaxp_s64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv16i8")] + fn _svmaxp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmaxp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmaxp_u8_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv8i16")] + fn _svmaxp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmaxp_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmaxp_u16_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv4i32")] + fn _svmaxp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmaxp_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmaxp_u32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv2i64")] + fn _svmaxp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmaxp_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmaxp_u64_m(pg, op1, op2) +} +#[doc = "Minimum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmp.nxv4f32" + )] + fn _svminnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminnmp_f32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnmp_f32_m(pg, op1, op2) +} +#[doc = "Minimum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmp.nxv2f64" + )] + fn _svminnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminnmp_f64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum number pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnmp_f64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv4f32")] + fn _svminp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminp_f32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminp_f32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv2f64")] + fn _svminp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminp_f64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminp_f64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv16i8")] + fn _svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svminp_s8_m(pg, op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svminp_s8_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv8i16")] + fn _svminp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svminp_s16_m(pg.into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svminp_s16_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv4i32")] + fn _svminp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svminp_s32_m(pg.into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svminp_s32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv2i64")] + fn _svminp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svminp_s64_m(pg.into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svminp_s64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv16i8")] + fn _svminp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svminp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svminp_u8_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv8i16")] + fn _svminp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svminp_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svminp_u16_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv4i32")] + fn _svminp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svminp_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svminp_u32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv2i64")] + fn _svminp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svminp_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svminp_u64_m(pg, op1, op2) +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv8i16" + )] + fn _svmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svmla_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv4i32" + )] + fn _svmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmla_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv2i64" + )] + fn _svmla_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmla_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe { + svmla_lane_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { + svmla_lane_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add, addend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { + svmla_lane_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalb.lane.nxv4i32" + )] + fn _svmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlalb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalb.lane.nxv2i64" + )] + fn _svmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlalb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalb.lane.nxv4i32" + )] + fn _svmlalb_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlalb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalb.lane.nxv2i64" + )] + fn _svmlalb_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlalb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv8i16")] + fn _svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalb_s16(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv4i32")] + fn _svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalb_s32(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv2i64")] + fn _svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalb_s64(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv8i16")] + fn _svmlalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlalb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv4i32")] + fn _svmlalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlalb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv2i64")] + fn _svmlalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlalb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalt.lane.nxv4i32" + )] + fn _svmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlalt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalt.lane.nxv2i64" + )] + fn _svmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlalt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalt.lane.nxv4i32" + )] + fn _svmlalt_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlalt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalt.lane.nxv2i64" + )] + fn _svmlalt_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlalt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv8i16")] + fn _svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalt_s16(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv4i32")] + fn _svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalt_s32(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv2i64")] + fn _svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalt_s64(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv8i16")] + fn _svmlalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlalt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv4i32")] + fn _svmlalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlalt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv2i64")] + fn _svmlalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlalt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv8i16" + )] + fn _svmls_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svmls_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv4i32" + )] + fn _svmls_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmls_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv2i64" + )] + fn _svmls_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmls_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe { + svmls_lane_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { + svmls_lane_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { + svmls_lane_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslb.lane.nxv4i32" + )] + fn _svmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlslb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslb.lane.nxv2i64" + )] + fn _svmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlslb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslb.lane.nxv4i32" + )] + fn _svmlslb_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlslb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslb.lane.nxv2i64" + )] + fn _svmlslb_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlslb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv8i16")] + fn _svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslb_s16(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlslb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv4i32")] + fn _svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslb_s32(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlslb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv2i64")] + fn _svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslb_s64(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlslb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv8i16")] + fn _svmlslb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlslb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv4i32")] + fn _svmlslb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlslb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv2i64")] + fn _svmlslb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlslb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslt.lane.nxv4i32" + )] + fn _svmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlslt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslt.lane.nxv2i64" + )] + fn _svmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlslt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslt.lane.nxv4i32" + )] + fn _svmlslt_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlslt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslt.lane.nxv2i64" + )] + fn _svmlslt_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlslt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv8i16")] + fn _svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslt_s16(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlslt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv4i32")] + fn _svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslt_s32(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlslt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv2i64")] + fn _svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslt_s64(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlslt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv8i16")] + fn _svmlslt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlslt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv4i32")] + fn _svmlslt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlslt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv2i64")] + fn _svmlslt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlslt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s16(op: svint8_t) -> svint16_t { + svshllb_n_s16::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s32(op: svint16_t) -> svint32_t { + svshllb_n_s32::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s64(op: svint32_t) -> svint64_t { + svshllb_n_s64::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u16(op: svuint8_t) -> svuint16_t { + svshllb_n_u16::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u32(op: svuint16_t) -> svuint32_t { + svshllb_n_u32::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u64(op: svuint32_t) -> svuint64_t { + svshllb_n_u64::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s16(op: svint8_t) -> svint16_t { + svshllt_n_s16::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s32(op: svint16_t) -> svint32_t { + svshllt_n_s32::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s64(op: svint32_t) -> svint64_t { + svshllt_n_s64::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u16(op: svuint8_t) -> svuint16_t { + svshllt_n_u16::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u32(op: svuint16_t) -> svuint32_t { + svshllt_n_u32::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u64(op: svuint32_t) -> svuint64_t { + svshllt_n_u64::<0>(op) +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] +pub fn svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmul.lane.nxv4f32" + )] + fn _svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t, imm_index: i32) -> svfloat32_t; + } + unsafe { _svmul_lane_f32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] +pub fn svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmul.lane.nxv2f64" + )] + fn _svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t, imm_index: i32) -> svfloat64_t; + } + unsafe { _svmul_lane_f64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv8i16" + )] + fn _svmul_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svmul_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv4i32" + )] + fn _svmul_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmul_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv2i64" + )] + fn _svmul_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmul_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe { svmul_lane_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe { svmul_lane_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe { svmul_lane_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +pub fn svmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullb.lane.nxv4i32" + )] + fn _svmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullb_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +pub fn svmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullb.lane.nxv2i64" + )] + fn _svmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullb_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +pub fn svmullb_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullb.lane.nxv4i32" + )] + fn _svmullb_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullb_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +pub fn svmullb_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullb.lane.nxv2i64" + )] + fn _svmullb_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullb_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv8i16")] + fn _svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullb_s16(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svmullb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv4i32")] + fn _svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullb_s32(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svmullb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv2i64")] + fn _svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullb_s64(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svmullb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv8i16")] + fn _svmullb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svmullb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv4i32")] + fn _svmullb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svmullb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv2i64")] + fn _svmullb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svmullb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +pub fn svmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullt.lane.nxv4i32" + )] + fn _svmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullt_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +pub fn svmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullt.lane.nxv2i64" + )] + fn _svmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullt_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +pub fn svmullt_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullt.lane.nxv4i32" + )] + fn _svmullt_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullt_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +pub fn svmullt_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullt.lane.nxv2i64" + )] + fn _svmullt_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullt_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv8i16")] + fn _svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullt_s16(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svmullt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv4i32")] + fn _svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullt_s32(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svmullt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv2i64")] + fn _svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullt_s64(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svmullt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv8i16")] + fn _svmullt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svmullt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv4i32")] + fn _svmullt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svmullt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv2i64")] + fn _svmullt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svmullt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv16i8")] + fn _svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svnbsl_s8(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svnbsl_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv8i16")] + fn _svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svnbsl_s16(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svnbsl_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv4i32")] + fn _svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svnbsl_s32(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svnbsl_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv2i64")] + fn _svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svnbsl_s64(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svnbsl_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svnbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svnbsl_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svnbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svnbsl_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svnbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svnbsl_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svnbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svnbsl_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Detect no matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv16i8")] + fn _svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svnmatch_s8(pg, op1, op2) } +} +#[doc = "Detect no matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv8i16")] + fn _svnmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svnmatch_s16(pg.into(), op1, op2).into() } +} +#[doc = "Detect no matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svnmatch_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Detect no matching elements"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svnmatch_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Polynomial multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(pmul))] +pub fn svpmul_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pmul.nxv16i8")] + fn _svpmul_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmul_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(pmul))] +pub fn svpmul_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmul_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv16i8" + )] + fn _svpmullb_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmullb_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmullb_pair_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv4i32" + )] + fn _svpmullb_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svpmullb_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svpmullb_pair_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv2i64" + )] + fn _svpmullb_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svpmullb_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svpmullb_pair_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe { simd_reinterpret(svpmullb_pair_u8(op1, op2)) } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svpmullb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe { simd_reinterpret(svpmullb_pair_u32(op1, op2)) } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svpmullb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv16i8" + )] + fn _svpmullt_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmullt_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmullt_pair_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv4i32" + )] + fn _svpmullt_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svpmullt_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svpmullt_pair_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv2i64" + )] + fn _svpmullt_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svpmullt_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svpmullt_pair_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe { simd_reinterpret(svpmullt_pair_u8(op1, op2)) } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svpmullt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe { simd_reinterpret(svpmullt_pair_u32(op1, op2)) } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svpmullt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv16i8")] + fn _svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svqabs_s8_m(inactive, pg, op) } +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svqabs_s8_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svqabs_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv8i16")] + fn _svqabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svqabs_s16_m(inactive, pg.into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svqabs_s16_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svqabs_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv4i32")] + fn _svqabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svqabs_s32_m(inactive, pg.into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svqabs_s32_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svqabs_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv2i64")] + fn _svqabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svqabs_s64_m(inactive, pg.into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svqabs_s64_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svqabs_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv16i8")] + fn _svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_s8_m(pg, op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqadd_s8_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv8i16")] + fn _svqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_s16_m(pg.into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqadd_s16_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv4i32")] + fn _svqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_s32_m(pg.into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqadd_s32_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv2i64")] + fn _svqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_s64_m(pg.into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqadd_s64_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv16i8")] + fn _svqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqadd_u8_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv8i16")] + fn _svqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqadd_u16_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv4i32")] + fn _svqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqadd_u32_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv2i64")] + fn _svqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqadd_u64_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv16i8" + )] + fn _svqcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svqcadd_s8(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv8i16" + )] + fn _svqcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t; + } + unsafe { _svqcadd_s16(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv4i32" + )] + fn _svqcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t; + } + unsafe { _svqcadd_s32(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv2i64" + )] + fn _svqcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t; + } + unsafe { _svqcadd_s64(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] +pub fn svqdmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv4i32" + )] + fn _svqdmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlalb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] +pub fn svqdmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv2i64" + )] + fn _svqdmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlalb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv8i16" + )] + fn _svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalb_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv4i32" + )] + fn _svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalb_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv2i64" + )] + fn _svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalb_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv8i16" + )] + fn _svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalbt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalbt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv4i32" + )] + fn _svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalbt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalbt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv2i64" + )] + fn _svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalbt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalbt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] +pub fn svqdmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv4i32" + )] + fn _svqdmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlalt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] +pub fn svqdmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv2i64" + )] + fn _svqdmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlalt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv8i16" + )] + fn _svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv4i32" + )] + fn _svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv2i64" + )] + fn _svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] +pub fn svqdmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv4i32" + )] + fn _svqdmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlslb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] +pub fn svqdmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv2i64" + )] + fn _svqdmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlslb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv8i16" + )] + fn _svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslb_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv4i32" + )] + fn _svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslb_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv2i64" + )] + fn _svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslb_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv8i16" + )] + fn _svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslbt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslbt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv4i32" + )] + fn _svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslbt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslbt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv2i64" + )] + fn _svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslbt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslbt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] +pub fn svqdmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv4i32" + )] + fn _svqdmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlslt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] +pub fn svqdmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv2i64" + )] + fn _svqdmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlslt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv8i16" + )] + fn _svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv4i32" + )] + fn _svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv2i64" + )] + fn _svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv8i16" + )] + fn _svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svqdmulh_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv4i32" + )] + fn _svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmulh_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv2i64" + )] + fn _svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmulh_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv16i8" + )] + fn _svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqdmulh_s8(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqdmulh_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv8i16" + )] + fn _svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqdmulh_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqdmulh_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv4i32" + )] + fn _svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqdmulh_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqdmulh_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv2i64" + )] + fn _svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqdmulh_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqdmulh_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] +pub fn svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv4i32" + )] + fn _svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmullb_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] +pub fn svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv2i64" + )] + fn _svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmullb_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv8i16" + )] + fn _svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svqdmullb_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svqdmullb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv4i32" + )] + fn _svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svqdmullb_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svqdmullb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv2i64" + )] + fn _svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svqdmullb_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svqdmullb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] +pub fn svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv4i32" + )] + fn _svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmullt_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] +pub fn svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv2i64" + )] + fn _svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmullt_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv8i16" + )] + fn _svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svqdmullt_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svqdmullt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv4i32" + )] + fn _svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svqdmullt_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svqdmullt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv2i64" + )] + fn _svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svqdmullt_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svqdmullt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv16i8")] + fn _svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svqneg_s8_m(inactive, pg, op) } +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svqneg_s8_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svqneg_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv8i16")] + fn _svqneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svqneg_s16_m(inactive, pg.into(), op) } +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svqneg_s16_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svqneg_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv4i32")] + fn _svqneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svqneg_s32_m(inactive, pg.into(), op) } +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svqneg_s32_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svqneg_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv2i64")] + fn _svqneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svqneg_s64_m(inactive, pg.into(), op) } +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svqneg_s64_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svqneg_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svqrdcmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv8i16" + )] + fn _svqrdcmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svqrdcmlah_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svqrdcmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv4i32" + )] + fn _svqrdcmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svqrdcmlah_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s8( + op1: svint8_t, + op2: svint8_t, + op3: svint8_t, +) -> svint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv16i8" + )] + fn _svqrdcmlah_s8( + op1: svint8_t, + op2: svint8_t, + op3: svint8_t, + imm_rotation: i32, + ) -> svint8_t; + } + unsafe { _svqrdcmlah_s8(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv8i16" + )] + fn _svqrdcmlah_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svqrdcmlah_s16(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv4i32" + )] + fn _svqrdcmlah_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svqrdcmlah_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv2i64" + )] + fn _svqrdcmlah_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svqrdcmlah_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv8i16" + )] + fn _svqrdmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svqrdmlah_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv4i32" + )] + fn _svqrdmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqrdmlah_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv2i64" + )] + fn _svqrdmlah_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqrdmlah_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv16i8" + )] + fn _svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svqrdmlah_s8(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svqrdmlah_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv8i16" + )] + fn _svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svqrdmlah_s16(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svqrdmlah_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv4i32" + )] + fn _svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svqrdmlah_s32(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svqrdmlah_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv2i64" + )] + fn _svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svqrdmlah_s64(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svqrdmlah_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv8i16" + )] + fn _svqrdmlsh_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svqrdmlsh_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv4i32" + )] + fn _svqrdmlsh_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqrdmlsh_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv2i64" + )] + fn _svqrdmlsh_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqrdmlsh_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv16i8" + )] + fn _svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svqrdmlsh_s8(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svqrdmlsh_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv8i16" + )] + fn _svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svqrdmlsh_s16(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svqrdmlsh_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv4i32" + )] + fn _svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svqrdmlsh_s32(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svqrdmlsh_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv2i64" + )] + fn _svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svqrdmlsh_s64(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svqrdmlsh_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0, 7); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv8i16" + )] + fn _svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svqrdmulh_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0, 3); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv4i32" + )] + fn _svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqrdmulh_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0, 1); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv2i64" + )] + fn _svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqrdmulh_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv16i8" + )] + fn _svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrdmulh_s8(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqrdmulh_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv8i16" + )] + fn _svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrdmulh_s16(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqrdmulh_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv4i32" + )] + fn _svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrdmulh_s32(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqrdmulh_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv2i64" + )] + fn _svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrdmulh_s64(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqrdmulh_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv16i8")] + fn _svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrshl_s8_m(pg, op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqrshl_s8_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv8i16")] + fn _svqrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrshl_s16_m(pg.into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqrshl_s16_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv4i32")] + fn _svqrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrshl_s32_m(pg.into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqrshl_s32_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv2i64")] + fn _svqrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrshl_s64_m(pg.into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqrshl_s64_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv16i8")] + fn _svqrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqrshl_u8_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv8i16")] + fn _svqrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrshl_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqrshl_u16_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv4i32")] + fn _svqrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrshl_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqrshl_u32_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv2i64")] + fn _svqrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrshl_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqrshl_u64_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv8i16" + )] + fn _svqrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnb_n_s16(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv4i32" + )] + fn _svqrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnb_n_s32(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv2i64" + )] + fn _svqrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnb_n_s64(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv8i16" + )] + fn _svqrshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv4i32" + )] + fn _svqrshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv2i64" + )] + fn _svqrshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv8i16" + )] + fn _svqrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv4i32" + )] + fn _svqrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv2i64" + )] + fn _svqrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv8i16" + )] + fn _svqrshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv4i32" + )] + fn _svqrshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv2i64" + )] + fn _svqrshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s16(op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv8i16" + )] + fn _svqrshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrunb_n_s16(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s32(op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv4i32" + )] + fn _svqrshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrunb_n_s32(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s64(op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv2i64" + )] + fn _svqrshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrunb_n_s64(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv8i16" + )] + fn _svqrshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv4i32" + )] + fn _svqrshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv2i64" + )] + fn _svqrshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv16i8")] + fn _svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqshl_s8_m(pg, op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqshl_s8_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv8i16")] + fn _svqshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqshl_s16_m(pg.into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqshl_s16_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv4i32")] + fn _svqshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqshl_s32_m(pg.into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqshl_s32_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv2i64")] + fn _svqshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqshl_s64_m(pg.into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqshl_s64_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv16i8")] + fn _svqshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqshl_u8_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv8i16")] + fn _svqshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqshl_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqshl_u16_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv4i32")] + fn _svqshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqshl_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqshl_u32_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv2i64")] + fn _svqshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqshl_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqshl_u64_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t) -> svuint8_t { + static_assert_range!(IMM2, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv16i8")] + fn _svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshlu_n_s8_m(pg, op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_x(pg: svbool_t, op1: svint8_t) -> svuint8_t { + svqshlu_n_s8_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_z(pg: svbool_t, op1: svint8_t) -> svuint8_t { + svqshlu_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_m(pg: svbool_t, op1: svint16_t) -> svuint16_t { + static_assert_range!(IMM2, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv8i16")] + fn _svqshlu_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshlu_n_s16_m(pg.into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_x(pg: svbool_t, op1: svint16_t) -> svuint16_t { + svqshlu_n_s16_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_z(pg: svbool_t, op1: svint16_t) -> svuint16_t { + svqshlu_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_m(pg: svbool_t, op1: svint32_t) -> svuint32_t { + static_assert_range!(IMM2, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv4i32")] + fn _svqshlu_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshlu_n_s32_m(pg.into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_x(pg: svbool_t, op1: svint32_t) -> svuint32_t { + svqshlu_n_s32_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_z(pg: svbool_t, op1: svint32_t) -> svuint32_t { + svqshlu_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_m(pg: svbool_t, op1: svint64_t) -> svuint64_t { + static_assert_range!(IMM2, 0, 63); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv2i64")] + fn _svqshlu_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svqshlu_n_s64_m(pg.into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_x(pg: svbool_t, op1: svint64_t) -> svuint64_t { + svqshlu_n_s64_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_z(pg: svbool_t, op1: svint64_t) -> svuint64_t { + svqshlu_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv8i16" + )] + fn _svqshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnb_n_s16(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv4i32" + )] + fn _svqshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnb_n_s32(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv2i64" + )] + fn _svqshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnb_n_s64(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv8i16" + )] + fn _svqshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv4i32" + )] + fn _svqshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv2i64" + )] + fn _svqshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv8i16" + )] + fn _svqshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv4i32" + )] + fn _svqshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv2i64" + )] + fn _svqshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv8i16" + )] + fn _svqshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv4i32" + )] + fn _svqshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv2i64" + )] + fn _svqshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s16(op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv8i16" + )] + fn _svqshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrunb_n_s16(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s32(op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv4i32" + )] + fn _svqshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrunb_n_s32(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s64(op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv2i64" + )] + fn _svqshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrunb_n_s64(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv8i16" + )] + fn _svqshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv4i32" + )] + fn _svqshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv2i64" + )] + fn _svqshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv16i8")] + fn _svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_s8_m(pg, op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsub_s8_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv8i16")] + fn _svqsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_s16_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsub_s16_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv4i32")] + fn _svqsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_s32_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsub_s32_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv2i64")] + fn _svqsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_s64_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsub_s64_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv16i8")] + fn _svqsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsub_u8_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv8i16")] + fn _svqsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsub_u16_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv4i32")] + fn _svqsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsub_u32_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv2i64")] + fn _svqsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsub_u64_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv16i8")] + fn _svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsubr_s8_m(pg, op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsubr_s8_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv8i16")] + fn _svqsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsubr_s16_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsubr_s16_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv4i32")] + fn _svqsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsubr_s32_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsubr_s32_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv2i64")] + fn _svqsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsubr_s64_m(pg.into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsubr_s64_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv16i8")] + fn _svqsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsubr_u8_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv8i16")] + fn _svqsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsubr_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsubr_u16_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv4i32")] + fn _svqsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsubr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsubr_u32_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv2i64")] + fn _svqsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsubr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsubr_u64_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s16(op: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv8i16")] + fn _svqxtnb_s16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnb_s16(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s32(op: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv4i32")] + fn _svqxtnb_s32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnb_s32(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s64(op: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv2i64")] + fn _svqxtnb_s64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnb_s64(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u16(op: svuint16_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv8i16")] + fn _svqxtnb_u16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnb_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u32(op: svuint32_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv4i32")] + fn _svqxtnb_u32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnb_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u64(op: svuint64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv2i64")] + fn _svqxtnb_u64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnb_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv8i16")] + fn _svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnt_s16(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv4i32")] + fn _svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnt_s32(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv2i64")] + fn _svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnt_s64(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u16(even: svuint8_t, op: svuint16_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv8i16")] + fn _svqxtnt_u16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnt_u16(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u32(even: svuint16_t, op: svuint32_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv4i32")] + fn _svqxtnt_u32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnt_u32(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u64(even: svuint32_t, op: svuint64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv2i64")] + fn _svqxtnt_u64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnt_u64(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s16(op: svint16_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv8i16" + )] + fn _svqxtunb_s16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtunb_s16(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s32(op: svint32_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv4i32" + )] + fn _svqxtunb_s32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtunb_s32(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s64(op: svint64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv2i64" + )] + fn _svqxtunb_s64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtunb_s64(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s16(even: svuint8_t, op: svint16_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv8i16" + )] + fn _svqxtunt_s16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtunt_s16(even.as_signed(), op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s32(even: svuint16_t, op: svint32_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv4i32" + )] + fn _svqxtunt_s32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtunt_s32(even.as_signed(), op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s64(even: svuint32_t, op: svint64_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv2i64" + )] + fn _svqxtunt_s64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtunt_s64(even.as_signed(), op).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv8i16" + )] + fn _svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svraddhnb_s16(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svraddhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv4i32" + )] + fn _svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svraddhnb_s32(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svraddhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv2i64" + )] + fn _svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svraddhnb_s64(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svraddhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svraddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svraddhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svraddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svraddhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svraddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svraddhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv8i16" + )] + fn _svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svraddhnt_s16(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svraddhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv4i32" + )] + fn _svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svraddhnt_s32(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svraddhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv2i64" + )] + fn _svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svraddhnt_s64(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svraddhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svraddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svraddhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svraddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svraddhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svraddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svraddhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise rotate left by 1 and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-sha3")] +#[cfg_attr(test, assert_instr(rax1))] +pub fn svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rax1")] + fn _svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrax1_s64(op1, op2) } +} +#[doc = "Bitwise rotate left by 1 and exclusive OR"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-sha3")] +#[cfg_attr(test, assert_instr(rax1))] +pub fn svrax1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svrax1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urecpe.nxv4i32")] + fn _svrecpe_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrecpe_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrecpe_u32_m(op, pg, op) +} +#[doc = "Reciprocal estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrecpe_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv16i8")] + fn _svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrhadd_s8_m(pg, op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrhadd_s8_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv8i16")] + fn _svrhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrhadd_s16_m(pg.into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrhadd_s16_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv4i32")] + fn _svrhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrhadd_s32_m(pg.into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrhadd_s32_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv2i64")] + fn _svrhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrhadd_s64_m(pg.into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrhadd_s64_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv16i8")] + fn _svrhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svrhadd_u8_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svrhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv8i16")] + fn _svrhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrhadd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svrhadd_u16_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svrhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv4i32")] + fn _svrhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrhadd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svrhadd_u32_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svrhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv2i64")] + fn _svrhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrhadd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svrhadd_u64_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svrhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv16i8")] + fn _svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrshl_s8_m(pg, op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrshl_s8_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv8i16")] + fn _svrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrshl_s16_m(pg.into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrshl_s16_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv4i32")] + fn _svrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrshl_s32_m(pg.into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrshl_s32_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv2i64")] + fn _svrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrshl_s64_m(pg.into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrshl_s64_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv16i8")] + fn _svrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svrshl_u8_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv8i16")] + fn _svrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrshl_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svrshl_u16_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv4i32")] + fn _svrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrshl_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svrshl_u32_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv2i64")] + fn _svrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrshl_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svrshl_u64_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv16i8")] + fn _svrshr_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshr_n_s8_m(pg, op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { + svrshr_n_s8_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { + svrshr_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv8i16")] + fn _svrshr_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshr_n_s16_m(pg.into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_t { + svrshr_n_s16_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_t { + svrshr_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv4i32")] + fn _svrshr_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshr_n_s32_m(pg.into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_t { + svrshr_n_s32_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_t { + svrshr_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_t { + static_assert_range!(IMM2, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv2i64")] + fn _svrshr_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svrshr_n_s64_m(pg.into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_t { + svrshr_n_s64_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_t { + svrshr_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_m(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv16i8")] + fn _svrshr_n_u8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshr_n_u8_m(pg, op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_x(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + svrshr_n_u8_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_z(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + svrshr_n_u8_m::(pg, svsel_u8(pg, op1, svdup_n_u8(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_m(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv8i16")] + fn _svrshr_n_u16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshr_n_u16_m(pg.into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_x(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + svrshr_n_u16_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_z(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + svrshr_n_u16_m::(pg, svsel_u16(pg, op1, svdup_n_u16(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_m(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv4i32")] + fn _svrshr_n_u32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshr_n_u32_m(pg.into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_x(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + svrshr_n_u32_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_z(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + svrshr_n_u32_m::(pg, svsel_u32(pg, op1, svdup_n_u32(0))) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_m(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + static_assert_range!(IMM2, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv2i64")] + fn _svrshr_n_u64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svrshr_n_u64_m(pg.into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_x(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + svrshr_n_u64_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_z(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + svrshr_n_u64_m::(pg, svsel_u64(pg, op1, svdup_n_u64(0))) +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv8i16")] + fn _svrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshrnb_n_s16(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv4i32")] + fn _svrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshrnb_n_s32(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv2i64")] + fn _svrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshrnb_n_s64(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe { svrshrnb_n_s16::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe { svrshrnb_n_s32::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe { svrshrnb_n_s64::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv8i16")] + fn _svrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv4i32")] + fn _svrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv2i64")] + fn _svrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe { svrshrnt_n_s16::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe { svrshrnt_n_s32::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe { svrshrnt_n_s64::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ursqrte.nxv4i32" + )] + fn _svrsqrte_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrsqrte_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrsqrte_u32_m(op, pg, op) +} +#[doc = "Reciprocal square root estimate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrsqrte_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv16i8")] + fn _svrsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svrsra_n_s8(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv8i16")] + fn _svrsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svrsra_n_s16(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv4i32")] + fn _svrsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svrsra_n_s32(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv2i64")] + fn _svrsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svrsra_n_s64(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv16i8")] + fn _svrsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svrsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv8i16")] + fn _svrsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svrsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv4i32")] + fn _svrsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svrsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv2i64")] + fn _svrsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svrsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv8i16" + )] + fn _svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svrsubhnb_s16(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svrsubhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv4i32" + )] + fn _svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svrsubhnb_s32(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svrsubhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv2i64" + )] + fn _svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svrsubhnb_s64(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svrsubhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svrsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svrsubhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svrsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svrsubhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svrsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svrsubhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv8i16" + )] + fn _svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svrsubhnt_s16(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svrsubhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv4i32" + )] + fn _svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svrsubhnt_s32(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svrsubhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv2i64" + )] + fn _svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svrsubhnt_s64(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svrsubhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svrsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svrsubhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svrsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svrsubhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svrsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svrsubhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv4i32")] + fn _svsbclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svsbclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svsbclb_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv2i64")] + fn _svsbclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svsbclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svsbclb_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Subtract with borrow long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv4i32")] + fn _svsbclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svsbclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svsbclt_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Subtract with borrow long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv2i64")] + fn _svsbclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svsbclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svsbclt_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s16(op1: svint8_t) -> svint16_t { + static_assert_range!(IMM2, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv8i16")] + fn _svshllb_n_s16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllb_n_s16(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s32(op1: svint16_t) -> svint32_t { + static_assert_range!(IMM2, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv4i32")] + fn _svshllb_n_s32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllb_n_s32(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s64(op1: svint32_t) -> svint64_t { + static_assert_range!(IMM2, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv2i64")] + fn _svshllb_n_s64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllb_n_s64(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u16(op1: svuint8_t) -> svuint16_t { + static_assert_range!(IMM2, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv8i16")] + fn _svshllb_n_u16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u32(op1: svuint16_t) -> svuint32_t { + static_assert_range!(IMM2, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv4i32")] + fn _svshllb_n_u32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u64(op1: svuint32_t) -> svuint64_t { + static_assert_range!(IMM2, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv2i64")] + fn _svshllb_n_u64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s16(op1: svint8_t) -> svint16_t { + static_assert_range!(IMM2, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv8i16")] + fn _svshllt_n_s16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllt_n_s16(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s32(op1: svint16_t) -> svint32_t { + static_assert_range!(IMM2, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv4i32")] + fn _svshllt_n_s32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllt_n_s32(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s64(op1: svint32_t) -> svint64_t { + static_assert_range!(IMM2, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv2i64")] + fn _svshllt_n_s64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllt_n_s64(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u16(op1: svuint8_t) -> svuint16_t { + static_assert_range!(IMM2, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv8i16")] + fn _svshllt_n_u16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllt_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u32(op1: svuint16_t) -> svuint32_t { + static_assert_range!(IMM2, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv4i32")] + fn _svshllt_n_u32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllt_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u64(op1: svuint32_t) -> svuint64_t { + static_assert_range!(IMM2, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv2i64")] + fn _svshllt_n_u64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllt_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv8i16")] + fn _svshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svshrnb_n_s16(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv4i32")] + fn _svshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svshrnb_n_s32(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv2i64")] + fn _svshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svshrnb_n_s64(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe { svshrnb_n_s16::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe { svshrnb_n_s32::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe { svshrnb_n_s64::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv8i16")] + fn _svshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv4i32")] + fn _svshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv2i64")] + fn _svshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1, 8); + unsafe { svshrnt_n_s16::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1, 16); + unsafe { svshrnt_n_s32::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1, 32); + unsafe { svshrnt_n_s64::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 0, 7); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv16i8")] + fn _svsli_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsli_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 0, 15); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv8i16")] + fn _svsli_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsli_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 0, 31); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv4i32")] + fn _svsli_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsli_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 0, 63); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv2i64")] + fn _svsli_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsli_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 0, 7); + unsafe { svsli_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 0, 15); + unsafe { svsli_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 0, 31); + unsafe { svsli_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 0, 63); + unsafe { svsli_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "SM4 encryption and decryption"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4e[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-sm4")] +#[cfg_attr(test, assert_instr(sm4e))] +pub fn svsm4e_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4e")] + fn _svsm4e_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsm4e_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "SM4 key updates"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4ekey[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2,sve2-sm4")] +#[cfg_attr(test, assert_instr(sm4ekey))] +pub fn svsm4ekey_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4ekey")] + fn _svsm4ekey_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsm4ekey_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv16i8")] + fn _svsqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsqadd_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svsqadd_u8_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svsqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv8i16")] + fn _svsqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsqadd_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svsqadd_u16_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svsqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv4i32")] + fn _svsqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsqadd_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svsqadd_u32_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svsqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv2i64")] + fn _svsqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsqadd_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svsqadd_u64_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svsqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv16i8")] + fn _svsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsra_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv8i16")] + fn _svsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsra_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv4i32")] + fn _svsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsra_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv2i64")] + fn _svsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsra_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv16i8")] + fn _svsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv8i16")] + fn _svsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv4i32")] + fn _svsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv2i64")] + fn _svsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv16i8")] + fn _svsri_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsri_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv8i16")] + fn _svsri_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsri_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv4i32")] + fn _svsri_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsri_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv2i64")] + fn _svsri_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsri_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe { svsri_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe { svsri_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe { svsri_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe { svsri_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svint64_t, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2f64" + )] + fn _svstnt1_scatter_s64index_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + indices: svint64_t, + ); + } + _svstnt1_scatter_s64index_f64(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i64" + )] + fn _svstnt1_scatter_s64index_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + indices: svint64_t, + ); + } + _svstnt1_scatter_s64index_s64(data, pg.into(), base, indices) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svuint64_t, + data: svfloat64_t, +) { + svstnt1_scatter_s64index_f64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svint64_t, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2f64" + )] + fn _svstnt1_scatter_s64offset_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + offsets: svint64_t, + ); + } + _svstnt1_scatter_s64offset_f64(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i64" + )] + fn _svstnt1_scatter_s64offset_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + offsets: svint64_t, + ); + } + _svstnt1_scatter_s64offset_s64(data, pg.into(), base, offsets) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32" + )] + fn _svstnt1_scatter_u32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svstnt1_scatter_u32offset_f32(data, pg.into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32" + )] + fn _svstnt1_scatter_u32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svstnt1_scatter_u32offset_s32(data, pg.into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svuint64_t, + data: svfloat64_t, +) { + svstnt1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) { + svstnt1_scatter_u32base_offset_f32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) { + svstnt1_scatter_u64base_offset_f64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svfloat32_t, +) { + svstnt1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svstnt1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svstnt1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svfloat64_t, +) { + svstnt1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svfloat32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svstnt1_scatter_u32base_offset_f32( + data: svfloat32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1_scatter_u32base_offset_f32(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svstnt1_scatter_u32base_offset_s32( + data: svint32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1_scatter_u32base_offset_s32(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svfloat64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svstnt1_scatter_u64base_offset_f64( + data: svfloat64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1_scatter_u64base_offset_f64(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svstnt1_scatter_u64base_offset_s64( + data: svint64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1_scatter_u64base_offset_s64(data, pg.into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i8" + )] + fn _svstnt1b_scatter_s64offset_s64( + data: nxv2i8, + pg: svbool2_t, + base: *mut i8, + offsets: svint64_t, + ); + } + _svstnt1b_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i16" + )] + fn _svstnt1h_scatter_s64offset_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + offsets: svint64_t, + ); + } + _svstnt1h_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i32" + )] + fn _svstnt1w_scatter_s64offset_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + offsets: svint64_t, + ); + } + _svstnt1w_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8" + )] + fn _svstnt1b_scatter_u32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svstnt1b_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16" + )] + fn _svstnt1h_scatter_u32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svstnt1h_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svstnt1b_scatter_u32base_offset_s32( + data: nxv4i8, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1b_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svstnt1h_scatter_u32base_offset_s32( + data: nxv4i16, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1h_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svstnt1b_scatter_u64base_offset_s64( + data: nxv2i8, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1b_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svstnt1h_scatter_u64base_offset_s64( + data: nxv2i16, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1h_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svstnt1w_scatter_u64base_offset_s64( + data: nxv2i32, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1w_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1b_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1b_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1h_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1b_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1b_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1h_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1w_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i16" + )] + fn _svstnt1h_scatter_s64index_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + indices: svint64_t, + ); + } + _svstnt1h_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i32" + )] + fn _svstnt1w_scatter_s64index_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + indices: svint64_t, + ); + } + _svstnt1w_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svstnt1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv8i16")] + fn _svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svsubhnb_s16(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svsubhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv4i32")] + fn _svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svsubhnb_s32(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svsubhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv2i64")] + fn _svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svsubhnb_s64(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svsubhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svsubhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svsubhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svsubhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv8i16")] + fn _svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svsubhnt_s16(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svsubhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv4i32")] + fn _svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svsubhnt_s32(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svsubhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv2i64")] + fn _svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svsubhnt_s64(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svsubhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svsubhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svsubhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svsubhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv8i16")] + fn _svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublb_s16(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv4i32")] + fn _svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublb_s32(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv2i64")] + fn _svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublb_s64(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv8i16")] + fn _svsublb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svsublb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv4i32")] + fn _svsublb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svsublb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv2i64")] + fn _svsublb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svsublb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv8i16" + )] + fn _svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublbt_s16(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublbt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv4i32" + )] + fn _svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublbt_s32(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublbt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv2i64" + )] + fn _svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublbt_s64(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublbt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv8i16")] + fn _svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublt_s16(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv4i32")] + fn _svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublt_s32(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv2i64")] + fn _svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublt_s64(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv8i16")] + fn _svsublt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svsublt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv4i32")] + fn _svsublt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svsublt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv2i64")] + fn _svsublt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svsublt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv8i16" + )] + fn _svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubltb_s16(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsubltb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv4i32" + )] + fn _svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubltb_s32(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsubltb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv2i64" + )] + fn _svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubltb_s64(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsubltb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv8i16")] + fn _svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwb_s16(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svsubwb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv4i32")] + fn _svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwb_s32(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svsubwb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv2i64")] + fn _svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwb_s64(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svsubwb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv8i16")] + fn _svsubwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svsubwb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv4i32")] + fn _svsubwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svsubwb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv2i64")] + fn _svsubwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svsubwb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv8i16")] + fn _svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwt_s16(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svsubwt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv4i32")] + fn _svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwt_s32(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svsubwt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv2i64")] + fn _svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwt_s64(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svsubwt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv8i16")] + fn _svsubwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svsubwt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv4i32")] + fn _svsubwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svsubwt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv2i64")] + fn _svsubwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svsubwt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_f32(data: svfloat32x2_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4f32")] + fn _svtbl2_f32(data0: svfloat32_t, data1: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { + _svtbl2_f32( + svget2_f32::<0>(data), + svget2_f32::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_f64(data: svfloat64x2_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2f64")] + fn _svtbl2_f64(data0: svfloat64_t, data1: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { + _svtbl2_f64( + svget2_f64::<0>(data), + svget2_f64::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s8(data: svint8x2_t, indices: svuint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv16i8")] + fn _svtbl2_s8(data0: svint8_t, data1: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { + _svtbl2_s8( + svget2_s8::<0>(data), + svget2_s8::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s16(data: svint16x2_t, indices: svuint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv8i16")] + fn _svtbl2_s16(data0: svint16_t, data1: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { + _svtbl2_s16( + svget2_s16::<0>(data), + svget2_s16::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s32(data: svint32x2_t, indices: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4i32")] + fn _svtbl2_s32(data0: svint32_t, data1: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { + _svtbl2_s32( + svget2_s32::<0>(data), + svget2_s32::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s64(data: svint64x2_t, indices: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2i64")] + fn _svtbl2_s64(data0: svint64_t, data1: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { + _svtbl2_s64( + svget2_s64::<0>(data), + svget2_s64::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u8(data: svuint8x2_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbl2_s8(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u16(data: svuint16x2_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbl2_s16(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u32(data: svuint32x2_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbl2_s32(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u64(data: svuint64x2_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbl2_s64(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4f32")] + fn _svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { _svtbx_f32(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2f64")] + fn _svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { _svtbx_f64(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svuint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv16i8")] + fn _svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { _svtbx_s8(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svuint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv8i16")] + fn _svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { _svtbx_s16(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4i32")] + fn _svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svtbx_s32(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2i64")] + fn _svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svtbx_s64(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u8(fallback: svuint8_t, data: svuint8_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbx_s8(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u16(fallback: svuint16_t, data: svuint16_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbx_s16(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u32(fallback: svuint32_t, data: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbx_s32(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u64(fallback: svuint64_t, data: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbx_s64(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_b])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(punpkhi))] +pub fn svunpkhi_b(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.punpkhi.nxv16i1" + )] + fn _svunpkhi_b(op: svbool_t) -> svbool8_t; + } + unsafe { _svunpkhi_b(op).into() } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s16(op: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv8i16" + )] + fn _svunpkhi_s16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpkhi_s16(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s32(op: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv4i32" + )] + fn _svunpkhi_s32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpkhi_s32(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s64(op: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv2i64" + )] + fn _svunpkhi_s64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpkhi_s64(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u16(op: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv8i16" + )] + fn _svunpkhi_u16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpkhi_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u32(op: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv4i32" + )] + fn _svunpkhi_u32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpkhi_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u64(op: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv2i64" + )] + fn _svunpkhi_u64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpkhi_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_b])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(punpklo))] +pub fn svunpklo_b(op: svbool_t) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.punpklo.nxv16i1" + )] + fn _svunpklo_b(op: svbool_t) -> svbool8_t; + } + unsafe { _svunpklo_b(op).into() } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s16(op: svint8_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv8i16" + )] + fn _svunpklo_s16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpklo_s16(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s32(op: svint16_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv4i32" + )] + fn _svunpklo_s32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpklo_s32(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s64(op: svint32_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv2i64" + )] + fn _svunpklo_s64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpklo_s64(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u16(op: svuint8_t) -> svuint16_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv8i16" + )] + fn _svunpklo_u16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpklo_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u32(op: svuint16_t) -> svuint32_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv4i32" + )] + fn _svunpklo_u32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpklo_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u64(op: svuint32_t) -> svuint64_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv2i64" + )] + fn _svunpklo_u64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpklo_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv16i8")] + fn _svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuqadd_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svuqadd_s8_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svuqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv8i16")] + fn _svuqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuqadd_s16_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svuqadd_s16_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svuqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv4i32")] + fn _svuqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuqadd_s32_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svuqadd_s32_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svuqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv2i64")] + fn _svuqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuqadd_s64_m(pg.into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_m)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svuqadd_s64_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_x)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svuqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_z)"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv16i1.i32" + )] + fn _svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilege_b8_s32(op1, op2) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv8i1.i32" + )] + fn _svwhilege_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilege_b16_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv4i1.i32" + )] + fn _svwhilege_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilege_b32_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv2i1.i32" + )] + fn _svwhilege_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilege_b64_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv16i1.i64" + )] + fn _svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilege_b8_s64(op1, op2) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv8i1.i64" + )] + fn _svwhilege_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilege_b16_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv4i1.i64" + )] + fn _svwhilege_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilege_b32_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv2i1.i64" + )] + fn _svwhilege_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilege_b64_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i32" + )] + fn _svwhilege_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilege_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i32" + )] + fn _svwhilege_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilege_b16_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i32" + )] + fn _svwhilege_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilege_b32_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i32" + )] + fn _svwhilege_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilege_b64_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i64" + )] + fn _svwhilege_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilege_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i64" + )] + fn _svwhilege_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilege_b16_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i64" + )] + fn _svwhilege_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilege_b32_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i64" + )] + fn _svwhilege_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilege_b64_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i32" + )] + fn _svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilegt_b8_s32(op1, op2) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i32" + )] + fn _svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilegt_b16_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i32" + )] + fn _svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilegt_b32_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i32" + )] + fn _svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilegt_b64_s32(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i64" + )] + fn _svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilegt_b8_s64(op1, op2) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i64" + )] + fn _svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilegt_b16_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i64" + )] + fn _svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilegt_b32_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i64" + )] + fn _svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilegt_b64_s64(op1, op2).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i32" + )] + fn _svwhilegt_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilegt_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i32" + )] + fn _svwhilegt_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilegt_b16_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i32" + )] + fn _svwhilegt_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilegt_b32_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i32" + )] + fn _svwhilegt_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilegt_b64_u32(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i64" + )] + fn _svwhilegt_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilegt_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i64" + )] + fn _svwhilegt_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilegt_b16_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i64" + )] + fn _svwhilegt_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilegt_b32_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i64" + )] + fn _svwhilegt_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilegt_b64_u64(op1.as_signed(), op2.as_signed()).into() } +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilerw_8ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.b.nxv16i1.p0" + )] + fn _svwhilerw_8ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool_t; + } + _svwhilerw_8ptr(op1, op2) +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilerw_16ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.h.nxv8i1.p0" + )] + fn _svwhilerw_16ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool8_t; + } + _svwhilerw_16ptr(op1, op2).into() +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilerw_32ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.s.nxv4i1.p0" + )] + fn _svwhilerw_32ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool4_t; + } + _svwhilerw_32ptr(op1, op2).into() +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilerw_64ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.d.nxv2i1.p0" + )] + fn _svwhilerw_64ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool2_t; + } + _svwhilerw_64ptr(op1, op2).into() +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_f32(op1: *const f32, op2: *const f32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_f64(op1: *const f64, op2: *const f64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s8(op1: *const i8, op2: *const i8) -> svbool_t { + svwhilerw_8ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s16(op1: *const i16, op2: *const i16) -> svbool_t { + svwhilerw_16ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s32(op1: *const i32, op2: *const i32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s64(op1: *const i64, op2: *const i64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u8(op1: *const u8, op2: *const u8) -> svbool_t { + svwhilerw_8ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u16(op1: *const u16, op2: *const u16) -> svbool_t { + svwhilerw_16ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u32(op1: *const u32, op2: *const u32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u64(op1: *const u64, op2: *const u64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilewr_8ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.b.nxv16i1.p0" + )] + fn _svwhilewr_8ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool_t; + } + _svwhilewr_8ptr(op1, op2) +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilewr_16ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.h.nxv8i1.p0" + )] + fn _svwhilewr_16ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool8_t; + } + _svwhilewr_16ptr(op1, op2).into() +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilewr_32ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.s.nxv4i1.p0" + )] + fn _svwhilewr_32ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool4_t; + } + _svwhilewr_32ptr(op1, op2).into() +} +#[inline] +#[target_feature(enable = "sve,sve2")] +unsafe fn svwhilewr_64ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.d.nxv2i1.p0" + )] + fn _svwhilewr_64ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool2_t; + } + _svwhilewr_64ptr(op1, op2).into() +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_f32(op1: *const f32, op2: *const f32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_f64(op1: *const f64, op2: *const f64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s8(op1: *const i8, op2: *const i8) -> svbool_t { + svwhilewr_8ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s16(op1: *const i16, op2: *const i16) -> svbool_t { + svwhilewr_16ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s32(op1: *const i32, op2: *const i32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s64(op1: *const i64, op2: *const i64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u8])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u8(op1: *const u8, op2: *const u8) -> svbool_t { + svwhilewr_8ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u16])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u16(op1: *const u16, op2: *const u16) -> svbool_t { + svwhilewr_16ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u32])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u32(op1: *const u32, op2: *const u32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u64])"] +#[doc = ""] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u64(op1: *const u64, op2: *const u64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv16i8")] + fn _svxar_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svxar_n_s8(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv8i16")] + fn _svxar_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svxar_n_s16(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")] + fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svxar_n_s32(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe extern "C" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv2i64")] + fn _svxar_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svxar_n_s64(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u8])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1, 8); + unsafe { svxar_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u16])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1, 16); + unsafe { svxar_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u32])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1, 32); + unsafe { svxar_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = ""] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u64])"] +#[inline] +#[target_feature(enable = "sve,sve2")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1, 64); + unsafe { svxar_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} From 27d3aee2629165f5fffedb722dfe026129e50eba Mon Sep 17 00:00:00 2001 From: wxh Date: Mon, 24 Nov 2025 11:41:58 +0800 Subject: [PATCH 26/27] Refactor SVE module: Enhance type conversion utilities and implement static dispatch for SVE select operations. Update documentation for clarity and maintainability. --- .../crates/core_arch/src/aarch64/sve/mod.rs | 399 ++++++--- .../crates/core_arch/src/aarch64/sve/types.rs | 773 ++++++++++++------ 2 files changed, 791 insertions(+), 381 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs index 9ae6aeb29769b..63d1a71ef7d74 100755 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -1,69 +1,85 @@ #![allow(unused_unsafe)] +// ============================================================================ +// Module Declarations +// ============================================================================ + mod sve; mod sve2; + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub mod types; -// ================================ -// 修复点 1/2:去掉 simd_*,改为位级转换 -// ================================ +use types::*; + +// ============================================================================ +// Type Conversion Utilities +// ============================================================================ + +/// Bit-level reinterpretation for SVE types. +/// +/// This function performs a pure bit-level reinterpretation. SVE wrapper types +/// are treated as opaque at this level to avoid triggering E0511 errors. #[inline] #[target_feature(enable = "sve")] pub(crate) unsafe fn simd_reinterpret(x: T) -> U { - // 纯位级重解释;SVE 封装类型在这层视为opaque,避免走 simd_cast 触发 E0511 core::mem::transmute_copy::(&x) } +/// Type casting for SVE types. +/// +/// Most SVE "casts" in stdarch are just layout-identical reinterpretations. +/// For actual value-semantic conversions, use the corresponding LLVM SVE convert +/// intrinsics in the specific API implementations. #[inline] #[target_feature(enable = "sve")] pub(crate) unsafe fn simd_cast(x: T) -> U { - // 多数 SVE "cast"在 stdarch 内部只是布局相同的重解释;按位转即可 - // 如需数值语义转换,请在具体 API 内对接相应 LLVM SVE convert 内建。 core::mem::transmute_copy::(&x) } -// ================================ -// 修复点 3/3:逐类型绑定 LLVM SVE `sel` 内建,替代 simd_select -// 说明:SVE 的“按谓词选择”在 LLVM 里是 aarch64.sve.sel.* 内建, -// 名字与元素类型/宽度对应,如:nxv16i8/nxv8i16/nxv4i32/nxv2i64、nxv4f32/nxv2f64。 -// 这是最稳妥的做法,避免把非SIMD类型喂给 simd_select 触发 E0511。 -// ================================ -use types::*; +// ============================================================================ +// SVE Select Operation (Predicated Selection) +// ============================================================================ +// +// SVE's predicated selection uses LLVM's aarch64.sve.sel.* intrinsics. +// The intrinsic names correspond to element types/widths: +// - nxv16i8, nxv8i16, nxv4i32, nxv2i64 (integers) +// - nxv4f32, nxv2f64 (floats) +// - nxv16i1 (predicates) +// +// This approach avoids feeding non-SIMD types to simd_select, which would +// trigger E0511 errors. -// 用 trait 把选择操作"静态分派"到对应的 LLVM SVE sel 内建上 +/// Trait for static dispatch of SVE select operations to LLVM intrinsics. pub(crate) trait __SveSelect { unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self; } -// 声明 LLVM 内建函数(每个唯一的后缀只声明一次) -// 使用泛型函数指针类型,避免重复声明 +// LLVM intrinsic declarations for SVE select operations unsafe extern "C" { #[link_name = "llvm.aarch64.sve.sel.nxv16i8"] fn __llvm_sve_sel_nxv16i8(mask: svbool_t, a: svint8_t, b: svint8_t) -> svint8_t; - + #[link_name = "llvm.aarch64.sve.sel.nxv8i16"] fn __llvm_sve_sel_nxv8i16(mask: svbool_t, a: svint16_t, b: svint16_t) -> svint16_t; - + #[link_name = "llvm.aarch64.sve.sel.nxv4i32"] fn __llvm_sve_sel_nxv4i32(mask: svbool_t, a: svint32_t, b: svint32_t) -> svint32_t; - + #[link_name = "llvm.aarch64.sve.sel.nxv2i64"] fn __llvm_sve_sel_nxv2i64(mask: svbool_t, a: svint64_t, b: svint64_t) -> svint64_t; - + #[link_name = "llvm.aarch64.sve.sel.nxv4f32"] fn __llvm_sve_sel_nxv4f32(mask: svbool_t, a: svfloat32_t, b: svfloat32_t) -> svfloat32_t; - + #[link_name = "llvm.aarch64.sve.sel.nxv2f64"] fn __llvm_sve_sel_nxv2f64(mask: svbool_t, a: svfloat64_t, b: svfloat64_t) -> svfloat64_t; - + #[link_name = "llvm.aarch64.sve.sel.nxv16i1"] fn __llvm_sve_sel_nxv16i1(mask: svbool_t, a: svbool_t, b: svbool_t) -> svbool_t; } -// 为每个类型实现 trait,调用对应的 LLVM 内建函数 -// 注意:svint8_t 和 svuint8_t 共享同一个 LLVM 内建函数(都是 nxv16i8) -// 由于它们在 LLVM 层面是相同的类型,可以直接使用 transmute 进行类型转换 +// Implementation for signed integer types impl __SveSelect for svint8_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { @@ -71,56 +87,75 @@ impl __SveSelect for svint8_t { } } -impl __SveSelect for svuint8_t { +impl __SveSelect for svint16_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { - // svuint8_t 和 svint8_t 在 LLVM 层面是相同的类型(都是 nxv16i8) - core::mem::transmute(__llvm_sve_sel_nxv16i8(mask, core::mem::transmute(a), core::mem::transmute(b))) + __llvm_sve_sel_nxv8i16(mask, a, b) } } -impl __SveSelect for svint16_t { +impl __SveSelect for svint32_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { - __llvm_sve_sel_nxv8i16(mask, a, b) + __llvm_sve_sel_nxv4i32(mask, a, b) } } -impl __SveSelect for svuint16_t { +impl __SveSelect for svint64_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { - core::mem::transmute(__llvm_sve_sel_nxv8i16(mask, core::mem::transmute(a), core::mem::transmute(b))) + __llvm_sve_sel_nxv2i64(mask, a, b) } } -impl __SveSelect for svint32_t { +// Implementation for unsigned integer types +// Note: svuint*_t and svint*_t share the same LLVM intrinsic at the same width +// since they have identical layouts in LLVM. +impl __SveSelect for svuint8_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { - __llvm_sve_sel_nxv4i32(mask, a, b) + core::mem::transmute(__llvm_sve_sel_nxv16i8( + mask, + core::mem::transmute(a), + core::mem::transmute(b), + )) } } -impl __SveSelect for svuint32_t { +impl __SveSelect for svuint16_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { - core::mem::transmute(__llvm_sve_sel_nxv4i32(mask, core::mem::transmute(a), core::mem::transmute(b))) + core::mem::transmute(__llvm_sve_sel_nxv8i16( + mask, + core::mem::transmute(a), + core::mem::transmute(b), + )) } } -impl __SveSelect for svint64_t { +impl __SveSelect for svuint32_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { - __llvm_sve_sel_nxv2i64(mask, a, b) + core::mem::transmute(__llvm_sve_sel_nxv4i32( + mask, + core::mem::transmute(a), + core::mem::transmute(b), + )) } } impl __SveSelect for svuint64_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { - core::mem::transmute(__llvm_sve_sel_nxv2i64(mask, core::mem::transmute(a), core::mem::transmute(b))) + core::mem::transmute(__llvm_sve_sel_nxv2i64( + mask, + core::mem::transmute(a), + core::mem::transmute(b), + )) } } +// Implementation for floating-point types impl __SveSelect for svfloat32_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { @@ -135,7 +170,7 @@ impl __SveSelect for svfloat64_t { } } -// svbool_t 是 1 位谓词向量,对应 nxv16i1 +// Implementation for predicate type (1-bit predicate vector, nxv16i1) impl __SveSelect for svbool_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { @@ -143,18 +178,22 @@ impl __SveSelect for svbool_t { } } -// 如果你在 types.rs 支持了 f16 / bf16 / mfloat8,可按需解开/补齐: -// impl_sve_select!("nxv8f16", svfloat16_t); -// impl_sve_select!("nxv8bf16", svbfloat16_t); -// impl_sve_select!("nxv16f8", svmfloat8_t); +// TODO: If f16/bf16/mfloat8 are supported in types.rs, add implementations: +// impl __SveSelect for svfloat16_t { ... } +// impl __SveSelect for svbfloat16_t { ... } +// impl __SveSelect for svmfloat8_t { ... } + +// ============================================================================ +// Predicate Type Conversions +// ============================================================================ +// +// These implementations use transmute_copy for bit-level conversion. +// No target feature is required since transmute_copy is a pure bit-level +// operation that doesn't involve SVE instructions. -// 实现从不同宽度的谓词类型到 svbool_t 的转换 -// 注意:这些实现直接使用 transmute_copy,不需要 target feature -// 因为 transmute_copy 是纯位级转换,不涉及 SVE 指令 impl From for svbool_t { #[inline(always)] fn from(x: svbool2_t) -> Self { - // 使用 transmute_copy 进行位级转换,不需要 target feature unsafe { core::mem::transmute_copy(&x) } } } @@ -173,13 +212,18 @@ impl From for svbool_t { } } -// 公开的"选择"总入口:保持原函数签名不变(被 sve/*.rs 调用) -// 现在它不再走 simd_select,而是经 trait 静态分派到 LLVM SVE `sel` +// ============================================================================ +// Public Select API +// ============================================================================ +// +// This is the public entry point for select operations, maintaining the +// original function signature (called by sve/*.rs). It now uses trait-based +// static dispatch to LLVM SVE `sel` intrinsics instead of simd_select. + #[inline] #[target_feature(enable = "sve")] pub(crate) unsafe fn simd_select(m: M, a: T, b: T) -> T where - // SVE 谓词统一为 svbool_t;避免出现 svbool4_t/svbool8_t 这类"假类型" M: Into, T: __SveSelect, { @@ -187,7 +231,11 @@ where ::sel(mask, a, b) } -// -------- 下面保持你原有的标量转换 Trait 实现不变 -------- +// ============================================================================ +// Scalar Type Conversion Traits +// ============================================================================ + +/// Trait for converting between signed and unsigned scalar types. trait ScalarConversion: Sized { type Unsigned; type Signed; @@ -195,127 +243,226 @@ trait ScalarConversion: Sized { fn as_signed(self) -> Self::Signed; } +// Signed integer implementations impl ScalarConversion for i8 { type Unsigned = u8; type Signed = i8; + #[inline(always)] - fn as_unsigned(self) -> u8 { self as u8 } - #[inline(always)] - fn as_signed(self) -> i8 { self } -} -impl ScalarConversion for u8 { - type Unsigned = u8; - type Signed = i8; - #[inline(always)] - fn as_unsigned(self) -> u8 { self } + fn as_unsigned(self) -> u8 { + self as u8 + } + #[inline(always)] - fn as_signed(self) -> i8 { self as i8 } + fn as_signed(self) -> i8 { + self + } } + impl ScalarConversion for i16 { type Unsigned = u16; type Signed = i16; + #[inline(always)] - fn as_unsigned(self) -> u16 { self as u16 } - #[inline(always)] - fn as_signed(self) -> i16 { self } -} -impl ScalarConversion for u16 { - type Unsigned = u16; - type Signed = i16; - #[inline(always)] - fn as_unsigned(self) -> u16 { self } + fn as_unsigned(self) -> u16 { + self as u16 + } + #[inline(always)] - fn as_signed(self) -> i16 { self as i16 } + fn as_signed(self) -> i16 { + self + } } + impl ScalarConversion for i32 { type Unsigned = u32; type Signed = i32; + #[inline(always)] - fn as_unsigned(self) -> u32 { self as u32 } - #[inline(always)] - fn as_signed(self) -> i32 { self } -} -impl ScalarConversion for u32 { - type Unsigned = u32; - type Signed = i32; - #[inline(always)] - fn as_unsigned(self) -> u32 { self } + fn as_unsigned(self) -> u32 { + self as u32 + } + #[inline(always)] - fn as_signed(self) -> i32 { self as i32 } + fn as_signed(self) -> i32 { + self + } } + impl ScalarConversion for i64 { type Unsigned = u64; type Signed = i64; + + #[inline(always)] + fn as_unsigned(self) -> u64 { + self as u64 + } + + #[inline(always)] + fn as_signed(self) -> i64 { + self + } +} + +// Unsigned integer implementations +impl ScalarConversion for u8 { + type Unsigned = u8; + type Signed = i8; + + #[inline(always)] + fn as_unsigned(self) -> u8 { + self + } + + #[inline(always)] + fn as_signed(self) -> i8 { + self as i8 + } +} + +impl ScalarConversion for u16 { + type Unsigned = u16; + type Signed = i16; + #[inline(always)] - fn as_unsigned(self) -> u64 { self as u64 } + fn as_unsigned(self) -> u16 { + self + } + #[inline(always)] - fn as_signed(self) -> i64 { self } + fn as_signed(self) -> i16 { + self as i16 + } } + +impl ScalarConversion for u32 { + type Unsigned = u32; + type Signed = i32; + + #[inline(always)] + fn as_unsigned(self) -> u32 { + self + } + + #[inline(always)] + fn as_signed(self) -> i32 { + self as i32 + } +} + impl ScalarConversion for u64 { type Unsigned = u64; type Signed = i64; + #[inline(always)] - fn as_unsigned(self) -> u64 { self } + fn as_unsigned(self) -> u64 { + self + } + #[inline(always)] - fn as_signed(self) -> i64 { self as i64 } + fn as_signed(self) -> i64 { + self as i64 + } } -// 指针类型实现 +// ============================================================================ +// Pointer Type Conversions +// ============================================================================ + macro_rules! impl_scalar_conversion_for_ptr { - ($(($unsigned:ty, $signed:ty)),*) => {$( - impl ScalarConversion for *const $unsigned { - type Unsigned = *const $unsigned; - type Signed = *const $signed; - #[inline(always)] - fn as_unsigned(self) -> *const $unsigned { self } - #[inline(always)] - fn as_signed(self) -> *const $signed { self as *const $signed } - } - impl ScalarConversion for *const $signed { - type Unsigned = *const $unsigned; - type Signed = *const $signed; - #[inline(always)] - fn as_unsigned(self) -> *const $unsigned { self as *const $unsigned } - #[inline(always)] - fn as_signed(self) -> *const $signed { self } - } - impl ScalarConversion for *mut $unsigned { - type Unsigned = *mut $unsigned; - type Signed = *mut $signed; - #[inline(always)] - fn as_unsigned(self) -> *mut $unsigned { self } - #[inline(always)] - fn as_signed(self) -> *mut $signed { self as *mut $signed } - } - impl ScalarConversion for *mut $signed { - type Unsigned = *mut $unsigned; - type Signed = *mut $signed; - #[inline(always)] - fn as_unsigned(self) -> *mut $unsigned { self as *mut $unsigned } - #[inline(always)] - fn as_signed(self) -> *mut $signed { self } - } - )*}; + ($(($unsigned:ty, $signed:ty)),*) => { + $( + impl ScalarConversion for *const $unsigned { + type Unsigned = *const $unsigned; + type Signed = *const $signed; + + #[inline(always)] + fn as_unsigned(self) -> *const $unsigned { + self + } + + #[inline(always)] + fn as_signed(self) -> *const $signed { + self as *const $signed + } + } + + impl ScalarConversion for *const $signed { + type Unsigned = *const $unsigned; + type Signed = *const $signed; + + #[inline(always)] + fn as_unsigned(self) -> *const $unsigned { + self as *const $unsigned + } + + #[inline(always)] + fn as_signed(self) -> *const $signed { + self + } + } + + impl ScalarConversion for *mut $unsigned { + type Unsigned = *mut $unsigned; + type Signed = *mut $signed; + + #[inline(always)] + fn as_unsigned(self) -> *mut $unsigned { + self + } + + #[inline(always)] + fn as_signed(self) -> *mut $signed { + self as *mut $signed + } + } + + impl ScalarConversion for *mut $signed { + type Unsigned = *mut $unsigned; + type Signed = *mut $signed; + + #[inline(always)] + fn as_unsigned(self) -> *mut $unsigned { + self as *mut $unsigned + } + + #[inline(always)] + fn as_signed(self) -> *mut $signed { + self + } + } + )* + }; } + impl_scalar_conversion_for_ptr!((u8, i8), (u16, i16), (u32, i32), (u64, i64)); -// 维持对外导出 +// ============================================================================ +// Public Exports +// ============================================================================ + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub use sve::*; + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub use sve2::*; + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub use types::*; -// a) 外部内建 +// ============================================================================ +// LLVM Intrinsics and Public APIs +// ============================================================================ + unsafe extern "C" { #[link_name = "llvm.aarch64.sve.whilelt"] fn __llvm_sve_whilelt_i32(i: i32, n: i32) -> svbool_t; } -// b) 对外 API -// 注意:svcntw() 函数在 sve.rs 中定义,使用正确的 LLVM 内建函数签名 - +/// Generate a predicate for while less-than comparison. +/// +/// Note: The svcntw() function is defined in sve.rs with the correct +/// LLVM intrinsic function signature. #[inline] #[target_feature(enable = "sve")] pub unsafe fn svwhilelt_b32(i: i32, n: i32) -> svbool_t { diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs index 9c27ae2226a0b..d6f754c1f783c 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs @@ -1,13 +1,16 @@ #![allow(non_camel_case_types)] -// 导入父模块中的 simd_cast 函数 +// ============================================================================ +// Imports +// ============================================================================ + use super::simd_cast; // ============================================================================ -// 核心SVE类型定义 - 最小化版本用于编译测试 +// SVE Predicate Types // ============================================================================ -/// SVE谓词类型 +/// SVE predicate type (1-bit predicate vector). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(1)] #[repr(C)] @@ -15,12 +18,15 @@ pub struct svbool_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svbool_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svbool_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE双宽度谓词类型 +/// SVE double-width predicate type (2-bit predicate vector). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(2)] #[repr(C)] @@ -28,12 +34,15 @@ pub struct svbool2_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svbool2_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svbool2_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE四宽度谓词类型 +/// SVE quad-width predicate type (4-bit predicate vector). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(4)] #[repr(C)] @@ -41,12 +50,15 @@ pub struct svbool4_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svbool4_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svbool4_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE八宽度谓词类型 +/// SVE octuple-width predicate type (8-bit predicate vector). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(8)] #[repr(C)] @@ -54,16 +66,19 @@ pub struct svbool8_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svbool8_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svbool8_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } // ============================================================================ -// SVE 向量类型定义 +// SVE Vector Types - Signed Integers // ============================================================================ -/// SVE 8位有符号整数向量 +/// SVE 8-bit signed integer vector. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(16)] #[repr(C)] @@ -71,12 +86,15 @@ pub struct svint8_t(i8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint8_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint8_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 16位有符号整数向量 +/// SVE 16-bit signed integer vector. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(8)] #[repr(C)] @@ -84,12 +102,15 @@ pub struct svint16_t(i16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint16_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint16_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 32位有符号整数向量 +/// SVE 32-bit signed integer vector. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(4)] #[repr(C)] @@ -97,12 +118,15 @@ pub struct svint32_t(i32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint32_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint32_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 64位有符号整数向量 +/// SVE 64-bit signed integer vector. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(2)] #[repr(C)] @@ -110,12 +134,19 @@ pub struct svint64_t(i64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint64_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint64_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 8位无符号整数向量 +// ============================================================================ +// SVE Vector Types - Unsigned Integers +// ============================================================================ + +/// SVE 8-bit unsigned integer vector. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(16)] #[repr(C)] @@ -123,12 +154,15 @@ pub struct svuint8_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint8_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svuint8_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 16位无符号整数向量 +/// SVE 16-bit unsigned integer vector. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(8)] #[repr(C)] @@ -136,12 +170,15 @@ pub struct svuint16_t(u16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint16_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svuint16_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 32位无符号整数向量 +/// SVE 32-bit unsigned integer vector. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(4)] #[repr(C)] @@ -149,12 +186,15 @@ pub struct svuint32_t(u32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint32_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svuint32_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 64位无符号整数向量 +/// SVE 64-bit unsigned integer vector. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(2)] #[repr(C)] @@ -162,12 +202,19 @@ pub struct svuint64_t(u64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint64_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svuint64_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 32位浮点向量 +// ============================================================================ +// SVE Vector Types - Floating Point +// ============================================================================ + +/// SVE 32-bit floating-point vector. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(4)] #[repr(C)] @@ -175,12 +222,15 @@ pub struct svfloat32_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svfloat32_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svfloat32_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 64位浮点向量 +/// SVE 64-bit floating-point vector. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(2)] #[repr(C)] @@ -188,12 +238,15 @@ pub struct svfloat64_t(f64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svfloat64_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svfloat64_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 16位浮点向量 (使用 f32 作为底层类型) +/// SVE 16-bit floating-point vector (uses f32 as underlying type). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(8)] #[repr(C)] @@ -201,16 +254,19 @@ pub struct svfloat16_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svfloat16_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svfloat16_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } // ============================================================================ -// SVE 向量元组类型定义 +// SVE Vector Tuple Types - x2 (Double Vectors) // ============================================================================ -/// SVE 8位有符号整数双向量 (x2) +/// SVE 8-bit signed integer double vector (x2). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(32)] #[repr(C)] @@ -218,12 +274,15 @@ pub struct svint8x2_t(i8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint8x2_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint8x2_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 8位无符号整数双向量 (x2) +/// SVE 8-bit unsigned integer double vector (x2). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(32)] #[repr(C)] @@ -231,12 +290,15 @@ pub struct svuint8x2_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint8x2_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svuint8x2_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 16位有符号整数双向量 (x2) +/// SVE 16-bit signed integer double vector (x2). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(16)] #[repr(C)] @@ -244,12 +306,15 @@ pub struct svint16x2_t(i16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint16x2_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint16x2_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 16位无符号整数双向量 (x2) +/// SVE 16-bit unsigned integer double vector (x2). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(16)] #[repr(C)] @@ -257,25 +322,15 @@ pub struct svuint16x2_t(u16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint16x2_t {} -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svuint16x2_t { - fn clone(&self) -> Self { *self } -} - -/// SVE 32位浮点双向量 (x2) -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[rustc_scalable_vector(8)] -#[repr(C)] -pub struct svfloat32x2_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svfloat32x2_t {} -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svfloat32x2_t { - fn clone(&self) -> Self { *self } +impl Clone for svuint16x2_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 32位有符号整数双向量 (x2) +/// SVE 32-bit signed integer double vector (x2). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(8)] #[repr(C)] @@ -283,12 +338,15 @@ pub struct svint32x2_t(i32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint32x2_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint32x2_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 32位无符号整数双向量 (x2) +/// SVE 32-bit unsigned integer double vector (x2). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(8)] #[repr(C)] @@ -296,51 +354,99 @@ pub struct svuint32x2_t(u32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint32x2_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svuint32x2_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 64位浮点双向量 (x2) +/// SVE 64-bit signed integer double vector (x2). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(4)] #[repr(C)] -pub struct svfloat64x2_t(f64); +pub struct svint64x2_t(i64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svfloat64x2_t {} +impl Copy for svint64x2_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svfloat64x2_t { - fn clone(&self) -> Self { *self } +impl Clone for svint64x2_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 64位有符号整数双向量 (x2) +/// SVE 64-bit unsigned integer double vector (x2). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(4)] #[repr(C)] -pub struct svint64x2_t(i64); +pub struct svuint64x2_t(u64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svint64x2_t {} +impl Copy for svuint64x2_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svint64x2_t { - fn clone(&self) -> Self { *self } +impl Clone for svuint64x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 32-bit floating-point double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(8)] +#[repr(C)] +pub struct svfloat32x2_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat32x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat32x2_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 64位无符号整数双向量 (x2) +/// SVE 64-bit floating-point double vector (x2). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(4)] #[repr(C)] -pub struct svuint64x2_t(u64); +pub struct svfloat64x2_t(f64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svuint64x2_t {} +impl Copy for svfloat64x2_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svuint64x2_t { - fn clone(&self) -> Self { *self } +impl Clone for svfloat64x2_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit floating-point double vector (x2). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(16)] +#[repr(C)] +pub struct svfloat16x2_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat16x2_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat16x2_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 8位有符号整数三向量 (x3) +// ============================================================================ +// SVE Vector Tuple Types - x3 (Triple Vectors) +// ============================================================================ + +/// SVE 8-bit signed integer triple vector (x3). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(48)] #[repr(C)] @@ -348,12 +454,15 @@ pub struct svint8x3_t(i8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint8x3_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint8x3_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 8位无符号整数三向量 (x3) +/// SVE 8-bit unsigned integer triple vector (x3). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(48)] #[repr(C)] @@ -361,12 +470,15 @@ pub struct svuint8x3_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint8x3_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svuint8x3_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 16位有符号整数三向量 (x3) +/// SVE 16-bit signed integer triple vector (x3). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(24)] #[repr(C)] @@ -374,12 +486,15 @@ pub struct svint16x3_t(i16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint16x3_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint16x3_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 16位无符号整数三向量 (x3) +/// SVE 16-bit unsigned integer triple vector (x3). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(24)] #[repr(C)] @@ -387,25 +502,15 @@ pub struct svuint16x3_t(u16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint16x3_t {} -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svuint16x3_t { - fn clone(&self) -> Self { *self } -} -/// SVE 32位浮点三向量 (x3) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[rustc_scalable_vector(12)] -#[repr(C)] -pub struct svfloat32x3_t(f32); - -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svfloat32x3_t {} -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svfloat32x3_t { - fn clone(&self) -> Self { *self } +impl Clone for svuint16x3_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 32位有符号整数三向量 (x3) +/// SVE 32-bit signed integer triple vector (x3). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(12)] #[repr(C)] @@ -413,12 +518,15 @@ pub struct svint32x3_t(i32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint32x3_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint32x3_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 32位无符号整数三向量 (x3) +/// SVE 32-bit unsigned integer triple vector (x3). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(12)] #[repr(C)] @@ -426,51 +534,99 @@ pub struct svuint32x3_t(u32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint32x3_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svuint32x3_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 64位浮点三向量 (x3) +/// SVE 64-bit signed integer triple vector (x3). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(6)] #[repr(C)] -pub struct svfloat64x3_t(f64); +pub struct svint64x3_t(i64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svfloat64x3_t {} +impl Copy for svint64x3_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svfloat64x3_t { - fn clone(&self) -> Self { *self } +impl Clone for svint64x3_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 64位有符号整数三向量 (x3) +/// SVE 64-bit unsigned integer triple vector (x3). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(6)] #[repr(C)] -pub struct svint64x3_t(i64); +pub struct svuint64x3_t(u64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svint64x3_t {} +impl Copy for svuint64x3_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svint64x3_t { - fn clone(&self) -> Self { *self } +impl Clone for svuint64x3_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 64位无符号整数三向量 (x3) +/// SVE 32-bit floating-point triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(12)] +#[repr(C)] +pub struct svfloat32x3_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat32x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat32x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 64-bit floating-point triple vector (x3). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(6)] #[repr(C)] -pub struct svuint64x3_t(u64); +pub struct svfloat64x3_t(f64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svuint64x3_t {} +impl Copy for svfloat64x3_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svuint64x3_t { - fn clone(&self) -> Self { *self } +impl Clone for svfloat64x3_t { + fn clone(&self) -> Self { + *self + } +} + +/// SVE 16-bit floating-point triple vector (x3). +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +#[rustc_scalable_vector(24)] +#[repr(C)] +pub struct svfloat16x3_t(f32); + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Copy for svfloat16x3_t {} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl Clone for svfloat16x3_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 8位有符号整数四向量 (x4) +// ============================================================================ +// SVE Vector Tuple Types - x4 (Quadruple Vectors) +// ============================================================================ + +/// SVE 8-bit signed integer quadruple vector (x4). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(64)] #[repr(C)] @@ -478,12 +634,15 @@ pub struct svint8x4_t(i8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint8x4_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint8x4_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 8位无符号整数四向量 (x4) +/// SVE 8-bit unsigned integer quadruple vector (x4). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(64)] #[repr(C)] @@ -491,12 +650,15 @@ pub struct svuint8x4_t(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint8x4_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svuint8x4_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 16位有符号整数四向量 (x4) +/// SVE 16-bit signed integer quadruple vector (x4). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(32)] #[repr(C)] @@ -504,12 +666,15 @@ pub struct svint16x4_t(i16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint16x4_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint16x4_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 16位无符号整数四向量 (x4) +/// SVE 16-bit unsigned integer quadruple vector (x4). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(32)] #[repr(C)] @@ -517,25 +682,15 @@ pub struct svuint16x4_t(u16); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint16x4_t {} -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svuint16x4_t { - fn clone(&self) -> Self { *self } -} - -/// SVE 32位浮点四向量 (x4) -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[rustc_scalable_vector(16)] -#[repr(C)] -pub struct svfloat32x4_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svfloat32x4_t {} -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svfloat32x4_t { - fn clone(&self) -> Self { *self } +impl Clone for svuint16x4_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 32位有符号整数四向量 (x4) +/// SVE 32-bit signed integer quadruple vector (x4). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(16)] #[repr(C)] @@ -543,12 +698,15 @@ pub struct svint32x4_t(i32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint32x4_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint32x4_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 32位无符号整数四向量 (x4) +/// SVE 32-bit unsigned integer quadruple vector (x4). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(16)] #[repr(C)] @@ -556,25 +714,15 @@ pub struct svuint32x4_t(u32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint32x4_t {} -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svuint32x4_t { - fn clone(&self) -> Self { *self } -} -/// SVE 64位浮点四向量 (x4) #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[rustc_scalable_vector(8)] -#[repr(C)] -pub struct svfloat64x4_t(f64); - -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svfloat64x4_t {} -#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svfloat64x4_t { - fn clone(&self) -> Self { *self } +impl Clone for svuint32x4_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 64位有符号整数四向量 (x4) +/// SVE 64-bit signed integer quadruple vector (x4). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(8)] #[repr(C)] @@ -582,12 +730,15 @@ pub struct svint64x4_t(i64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svint64x4_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svint64x4_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 64位无符号整数四向量 (x4) +/// SVE 64-bit unsigned integer quadruple vector (x4). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(8)] #[repr(C)] @@ -595,38 +746,47 @@ pub struct svuint64x4_t(u64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svuint64x4_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svuint64x4_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } -/// SVE 16位浮点双向量 (x2) +/// SVE 32-bit floating-point quadruple vector (x4). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(16)] #[repr(C)] -pub struct svfloat16x2_t(f32); +pub struct svfloat32x4_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svfloat16x2_t {} +impl Copy for svfloat32x4_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svfloat16x2_t { - fn clone(&self) -> Self { *self } +impl Clone for svfloat32x4_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 16位浮点三向量 (x3) +/// SVE 64-bit floating-point quadruple vector (x4). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -#[rustc_scalable_vector(24)] +#[rustc_scalable_vector(8)] #[repr(C)] -pub struct svfloat16x3_t(f32); +pub struct svfloat64x4_t(f64); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Copy for svfloat16x3_t {} +impl Copy for svfloat64x4_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] -impl Clone for svfloat16x3_t { - fn clone(&self) -> Self { *self } +impl Clone for svfloat64x4_t { + fn clone(&self) -> Self { + *self + } } -/// SVE 16位浮点四向量 (x4) +/// SVE 16-bit floating-point quadruple vector (x4). #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[rustc_scalable_vector(32)] #[repr(C)] @@ -634,16 +794,19 @@ pub struct svfloat16x4_t(f32); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Copy for svfloat16x4_t {} + #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl Clone for svfloat16x4_t { - fn clone(&self) -> Self { *self } + fn clone(&self) -> Self { + *self + } } // ============================================================================ -// SVE 辅助类型 +// SVE Auxiliary Types // ============================================================================ -/// SVE模式类型 +/// SVE pattern type for vector length specification. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[repr(C)] #[derive(Copy, Clone, PartialEq, Eq, Debug, core::marker::ConstParamTy)] @@ -651,20 +814,21 @@ pub struct svpattern(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl svpattern { - /// 从原始字节创建模式值 + /// Create a pattern value from a raw byte. #[inline(always)] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub const fn from_raw(value: u8) -> Self { svpattern(value) } - /// 以原始字节形式返回模式值 + /// Return the pattern value as a raw byte. #[inline(always)] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub const fn as_raw(self) -> u8 { self.0 } + // Pattern constants #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub const SV_ALL: svpattern = svpattern(31); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -701,7 +865,7 @@ impl svpattern { pub const SV_MUL3: svpattern = svpattern(28); } -/// SVE预取操作类型 +/// SVE prefetch operation type. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] #[repr(C)] #[derive(Copy, Clone, PartialEq, Eq, Debug, core::marker::ConstParamTy)] @@ -709,20 +873,21 @@ pub struct svprfop(u8); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl svprfop { - /// 从原始字节创建预取操作值 + /// Create a prefetch operation value from a raw byte. #[inline(always)] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub const fn from_raw(value: u8) -> Self { svprfop(value) } - /// 以原始字节形式返回预取操作值 + /// Return the prefetch operation value as a raw byte. #[inline(always)] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub const fn as_raw(self) -> u8 { self.0 } + // Prefetch operation constants #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub const SV_PLDL1KEEP: svprfop = svprfop(0); #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -750,21 +915,18 @@ impl svprfop { } // ============================================================================ -// 类型转换辅助函数(仅用于内部) +// Predicate Type Conversion Methods // ============================================================================ -// 注意:simd_cast 函数定义在父模块 mod.rs 中,使用 transmute_copy 避免 E0511 错误 +// +// These methods provide conversion APIs similar to From::from but with +// #[target_feature(enable = "sve")] for cross-compilation support. +// The simd_cast function is defined in the parent module (mod.rs) and uses +// transmute_copy to avoid E0511 errors. -// ============================================================================ -// 类 From trait 的转换方法 - 适用于交叉编译 -// ============================================================================ - -// 方案:使用 Associated Functions 提供类似 From::from 的 API -// 这些方法添加了 #[target_feature(enable = "sve")],可以在交叉编译时正常工作 - -/// svbool_t 的转换方法 +/// Conversion methods for svbool_t. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl svbool_t { - /// 转换为 svbool2_t + /// Convert to svbool2_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -772,7 +934,7 @@ impl svbool_t { simd_cast(self) } - /// 转换为 svbool4_t + /// Convert to svbool4_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -780,7 +942,7 @@ impl svbool_t { simd_cast(self) } - /// 转换为 svbool8_t + /// Convert to svbool8_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -789,10 +951,10 @@ impl svbool_t { } } -/// svbool2_t 的转换方法 +/// Conversion methods for svbool2_t. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl svbool2_t { - /// 从 svbool_t 创建(类似 From::from) + /// Create from svbool_t (similar to From::from). #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -800,7 +962,7 @@ impl svbool2_t { simd_cast(x) } - /// 转换为 svbool_t + /// Convert to svbool_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -808,7 +970,7 @@ impl svbool2_t { simd_cast(self) } - /// 转换为 svbool4_t + /// Convert to svbool4_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -816,7 +978,7 @@ impl svbool2_t { simd_cast(self) } - /// 转换为 svbool8_t + /// Convert to svbool8_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -824,7 +986,7 @@ impl svbool2_t { simd_cast(self) } - /// 从 svbool4_t 创建 + /// Create from svbool4_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -832,7 +994,7 @@ impl svbool2_t { simd_cast(x) } - /// 从 svbool8_t 创建 + /// Create from svbool8_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -841,10 +1003,10 @@ impl svbool2_t { } } -/// svbool4_t 的转换方法 +/// Conversion methods for svbool4_t. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl svbool4_t { - /// 从 svbool_t 创建(类似 From::from) + /// Create from svbool_t (similar to From::from). #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -852,7 +1014,7 @@ impl svbool4_t { simd_cast(x) } - /// 转换为 svbool_t + /// Convert to svbool_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -860,7 +1022,7 @@ impl svbool4_t { simd_cast(self) } - /// 转换为 svbool2_t + /// Convert to svbool2_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -868,7 +1030,7 @@ impl svbool4_t { simd_cast(self) } - /// 转换为 svbool8_t + /// Convert to svbool8_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -876,7 +1038,7 @@ impl svbool4_t { simd_cast(self) } - /// 从 svbool2_t 创建 + /// Create from svbool2_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -884,7 +1046,7 @@ impl svbool4_t { simd_cast(x) } - /// 从 svbool8_t 创建 + /// Create from svbool8_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -893,10 +1055,10 @@ impl svbool4_t { } } -/// svbool8_t 的转换方法 +/// Conversion methods for svbool8_t. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl svbool8_t { - /// 从 svbool_t 创建(类似 From::from) + /// Create from svbool_t (similar to From::from). #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -904,7 +1066,7 @@ impl svbool8_t { simd_cast(x) } - /// 转换为 svbool_t + /// Convert to svbool_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -912,7 +1074,7 @@ impl svbool8_t { simd_cast(self) } - /// 转换为 svbool2_t + /// Convert to svbool2_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -920,7 +1082,7 @@ impl svbool8_t { simd_cast(self) } - /// 转换为 svbool4_t + /// Convert to svbool4_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -928,7 +1090,7 @@ impl svbool8_t { simd_cast(self) } - /// 从 svbool2_t 创建 + /// Create from svbool2_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -936,7 +1098,7 @@ impl svbool8_t { simd_cast(x) } - /// 从 svbool4_t 创建 + /// Create from svbool4_t. #[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -946,10 +1108,13 @@ impl svbool8_t { } // ============================================================================ -// From trait 实现 - 用于生成的代码中的 .into() 调用 +// From Trait Implementations // ============================================================================ -// 注意:这些实现不使用 target_feature,因为 From trait 不能有该属性 -// 类型转换本身是安全的,不涉及实际的 SIMD 操作 +// +// These implementations are used for .into() calls in generated code. +// Note: These implementations do not use target_feature because the From +// trait cannot have that attribute. The type conversion itself is safe and +// does not involve actual SIMD operations. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl From for svbool2_t { @@ -976,10 +1141,13 @@ impl From for svbool8_t { } // ============================================================================ -// 类型转换 Trait - 用于生成的代码 +// Type Conversion Traits // ============================================================================ +// +// These traits are used in generated code for converting between signed and +// unsigned vector types. -/// 转换为无符号向量类型 +/// Trait for converting to unsigned vector types. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub trait AsUnsigned { #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -988,7 +1156,7 @@ pub trait AsUnsigned { fn as_unsigned(self) -> Self::Unsigned; } -/// 转换为有符号向量类型 +/// Trait for converting to signed vector types. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub trait AsSigned { #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -997,12 +1165,18 @@ pub trait AsSigned { fn as_signed(self) -> Self::Signed; } -// 为所有 SVE 整数类型实现转换 trait +// ============================================================================ +// AsUnsigned and AsSigned Implementations - Single Vectors +// ============================================================================ + +// 8-bit types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint8_t { type Unsigned = svuint8_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1027,14 +1201,19 @@ impl AsUnsigned for svint8_t { impl AsSigned for svint8_t { type Signed = svint8_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 16-bit types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint16_t { type Unsigned = svuint16_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1059,14 +1238,19 @@ impl AsUnsigned for svint16_t { impl AsSigned for svint16_t { type Signed = svint16_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 32-bit types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint32_t { type Unsigned = svuint32_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1091,14 +1275,19 @@ impl AsUnsigned for svint32_t { impl AsSigned for svint32_t { type Signed = svint32_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 64-bit types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint64_t { type Unsigned = svuint64_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1123,14 +1312,23 @@ impl AsUnsigned for svint64_t { impl AsSigned for svint64_t { type Signed = svint64_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// ============================================================================ +// AsUnsigned and AsSigned Implementations - x2 Tuple Vectors +// ============================================================================ + +// 8-bit x2 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint8x2_t { type Unsigned = svuint8x2_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1155,14 +1353,19 @@ impl AsUnsigned for svint8x2_t { impl AsSigned for svint8x2_t { type Signed = svint8x2_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 16-bit x2 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint16x2_t { type Unsigned = svuint16x2_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1187,14 +1390,19 @@ impl AsUnsigned for svint16x2_t { impl AsSigned for svint16x2_t { type Signed = svint16x2_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 32-bit x2 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint32x2_t { type Unsigned = svuint32x2_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1219,14 +1427,19 @@ impl AsUnsigned for svint32x2_t { impl AsSigned for svint32x2_t { type Signed = svint32x2_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 64-bit x2 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint64x2_t { type Unsigned = svuint64x2_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1251,14 +1464,23 @@ impl AsUnsigned for svint64x2_t { impl AsSigned for svint64x2_t { type Signed = svint64x2_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// ============================================================================ +// AsUnsigned and AsSigned Implementations - x3 Tuple Vectors +// ============================================================================ + +// 8-bit x3 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint8x3_t { type Unsigned = svuint8x3_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1283,14 +1505,19 @@ impl AsUnsigned for svint8x3_t { impl AsSigned for svint8x3_t { type Signed = svint8x3_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 16-bit x3 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint16x3_t { type Unsigned = svuint16x3_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1315,14 +1542,19 @@ impl AsUnsigned for svint16x3_t { impl AsSigned for svint16x3_t { type Signed = svint16x3_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 32-bit x3 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint32x3_t { type Unsigned = svuint32x3_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1347,14 +1579,19 @@ impl AsUnsigned for svint32x3_t { impl AsSigned for svint32x3_t { type Signed = svint32x3_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 64-bit x3 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint64x3_t { type Unsigned = svuint64x3_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1379,14 +1616,23 @@ impl AsUnsigned for svint64x3_t { impl AsSigned for svint64x3_t { type Signed = svint64x3_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// ============================================================================ +// AsUnsigned and AsSigned Implementations - x4 Tuple Vectors +// ============================================================================ + +// 8-bit x4 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint8x4_t { type Unsigned = svuint8x4_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1411,14 +1657,19 @@ impl AsUnsigned for svint8x4_t { impl AsSigned for svint8x4_t { type Signed = svint8x4_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 16-bit x4 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint16x4_t { type Unsigned = svuint16x4_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1443,14 +1694,19 @@ impl AsUnsigned for svint16x4_t { impl AsSigned for svint16x4_t { type Signed = svint16x4_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 32-bit x4 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint32x4_t { type Unsigned = svuint32x4_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1475,14 +1731,19 @@ impl AsUnsigned for svint32x4_t { impl AsSigned for svint32x4_t { type Signed = svint32x4_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } +// 64-bit x4 types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl AsUnsigned for svuint64x4_t { type Unsigned = svuint64x4_t; #[inline(always)] - fn as_unsigned(self) -> Self::Unsigned { self } + fn as_unsigned(self) -> Self::Unsigned { + self + } } #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1507,15 +1768,19 @@ impl AsUnsigned for svint64x4_t { impl AsSigned for svint64x4_t { type Signed = svint64x4_t; #[inline(always)] - fn as_signed(self) -> Self::Signed { self } + fn as_signed(self) -> Self::Signed { + self + } } // ============================================================================ -// LLVM 类型别名 - 用于生成的代码 +// LLVM Type Aliases // ============================================================================ -// 这些类型别名将 LLVM 机器表示映射到 Rust 类型,用于代码生成器生成的代码 +// +// These type aliases map LLVM machine representations to Rust types for use +// in code generated by the code generator. -// 有符号整数类型别名 +// Signed integer type aliases #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub type nxv8i8 = svint8_t; @@ -1534,7 +1799,7 @@ pub type nxv2i16 = svint16_t; #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub type nxv2i32 = svint32_t; -// 无符号整数类型别名 +// Unsigned integer type aliases #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub type nxv8u8 = svuint8_t; @@ -1552,5 +1817,3 @@ pub type nxv2u16 = svuint16_t; #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub type nxv2u32 = svuint32_t; - -// ============================================================================ From 2bdd4d79826bfab15d088157c59b619b48e7835e Mon Sep 17 00:00:00 2001 From: wxh Date: Mon, 24 Nov 2025 12:23:54 +0800 Subject: [PATCH 27/27] Enhance SVE module: Add predicate generation functions and improve type conversion documentation. Refactor existing conversion implementations for clarity and consistency. --- .../crates/core_arch/src/aarch64/sve/mod.rs | 75 ++++--------------- .../crates/core_arch/src/aarch64/sve/types.rs | 74 ++++++++++++++++-- 2 files changed, 85 insertions(+), 64 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs index 63d1a71ef7d74..14376777b2d43 100755 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -15,6 +15,10 @@ use types::*; // ============================================================================ // Type Conversion Utilities // ============================================================================ +// +// These utility functions provide low-level type conversion capabilities +// for SVE types. They use transmute_copy for bit-level reinterpretation +// to avoid triggering E0511 errors with non-SIMD types. /// Bit-level reinterpretation for SVE types. /// @@ -79,7 +83,7 @@ unsafe extern "C" { fn __llvm_sve_sel_nxv16i1(mask: svbool_t, a: svbool_t, b: svbool_t) -> svbool_t; } -// Implementation for signed integer types +// Signed integer type implementations impl __SveSelect for svint8_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { @@ -108,7 +112,7 @@ impl __SveSelect for svint64_t { } } -// Implementation for unsigned integer types +// Unsigned integer type implementations // Note: svuint*_t and svint*_t share the same LLVM intrinsic at the same width // since they have identical layouts in LLVM. impl __SveSelect for svuint8_t { @@ -155,7 +159,7 @@ impl __SveSelect for svuint64_t { } } -// Implementation for floating-point types +// Floating-point type implementations impl __SveSelect for svfloat32_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { @@ -170,7 +174,7 @@ impl __SveSelect for svfloat64_t { } } -// Implementation for predicate type (1-bit predicate vector, nxv16i1) +// Predicate type implementation (1-bit predicate vector, nxv16i1) impl __SveSelect for svbool_t { #[inline(always)] unsafe fn sel(mask: svbool_t, a: Self, b: Self) -> Self { @@ -183,35 +187,6 @@ impl __SveSelect for svbool_t { // impl __SveSelect for svbfloat16_t { ... } // impl __SveSelect for svmfloat8_t { ... } -// ============================================================================ -// Predicate Type Conversions -// ============================================================================ -// -// These implementations use transmute_copy for bit-level conversion. -// No target feature is required since transmute_copy is a pure bit-level -// operation that doesn't involve SVE instructions. - -impl From for svbool_t { - #[inline(always)] - fn from(x: svbool2_t) -> Self { - unsafe { core::mem::transmute_copy(&x) } - } -} - -impl From for svbool_t { - #[inline(always)] - fn from(x: svbool4_t) -> Self { - unsafe { core::mem::transmute_copy(&x) } - } -} - -impl From for svbool_t { - #[inline(always)] - fn from(x: svbool8_t) -> Self { - unsafe { core::mem::transmute_copy(&x) } - } -} - // ============================================================================ // Public Select API // ============================================================================ @@ -232,11 +207,15 @@ where } // ============================================================================ -// Scalar Type Conversion Traits +// Scalar and Pointer Type Conversion Traits // ============================================================================ +// +// These traits provide conversion capabilities for scalar types and pointers +// used in SVE API implementations. They enable seamless conversion between +// signed and unsigned representations. /// Trait for converting between signed and unsigned scalar types. -trait ScalarConversion: Sized { +pub(crate) trait ScalarConversion: Sized { type Unsigned; type Signed; fn as_unsigned(self) -> Self::Unsigned; @@ -365,10 +344,7 @@ impl ScalarConversion for u64 { } } -// ============================================================================ -// Pointer Type Conversions -// ============================================================================ - +// Pointer type conversions are implemented via macro below macro_rules! impl_scalar_conversion_for_ptr { ($(($unsigned:ty, $signed:ty)),*) => { $( @@ -438,7 +414,7 @@ macro_rules! impl_scalar_conversion_for_ptr { impl_scalar_conversion_for_ptr!((u8, i8), (u16, i16), (u32, i32), (u64, i64)); // ============================================================================ -// Public Exports +// Public Module Exports // ============================================================================ #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -449,22 +425,3 @@ pub use sve2::*; #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] pub use types::*; - -// ============================================================================ -// LLVM Intrinsics and Public APIs -// ============================================================================ - -unsafe extern "C" { - #[link_name = "llvm.aarch64.sve.whilelt"] - fn __llvm_sve_whilelt_i32(i: i32, n: i32) -> svbool_t; -} - -/// Generate a predicate for while less-than comparison. -/// -/// Note: The svcntw() function is defined in sve.rs with the correct -/// LLVM intrinsic function signature. -#[inline] -#[target_feature(enable = "sve")] -pub unsafe fn svwhilelt_b32(i: i32, n: i32) -> svbool_t { - __llvm_sve_whilelt_i32(i, n) -} diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs index d6f754c1f783c..c9260b6c13774 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/types.rs @@ -74,6 +74,37 @@ impl Clone for svbool8_t { } } +// ============================================================================ +// Predicate Generation Functions +// ============================================================================ +// +// These functions generate predicate vectors for loop control and conditional +// operations. They provide convenient wrappers around LLVM SVE intrinsics. + +unsafe extern "C" { + #[link_name = "llvm.aarch64.sve.whilelt"] + fn __llvm_sve_whilelt_i32(i: i32, n: i32) -> svbool_t; +} + +/// Generate a predicate for while less-than comparison. +/// +/// This function generates a predicate vector where each element is true +/// if the corresponding index (starting from `i`) is less than `n`. +/// +/// This is a convenience wrapper for loop control in SVE code. For more +/// specific variants (e.g., `svwhilelt_b32_s32`), see the functions in +/// the `sve` module. +/// +/// # Safety +/// +/// This function is marked unsafe because it requires the `sve` target feature. +#[inline] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +pub unsafe fn svwhilelt_b32(i: i32, n: i32) -> svbool_t { + __llvm_sve_whilelt_i32(i, n) +} + // ============================================================================ // SVE Vector Types - Signed Integers // ============================================================================ @@ -922,6 +953,8 @@ impl svprfop { // #[target_feature(enable = "sve")] for cross-compilation support. // The simd_cast function is defined in the parent module (mod.rs) and uses // transmute_copy to avoid E0511 errors. +// +// Note: These methods are organized by the source type for clarity. /// Conversion methods for svbool_t. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1108,7 +1141,7 @@ impl svbool8_t { } // ============================================================================ -// From Trait Implementations +// From Trait Implementations for Predicate Types // ============================================================================ // // These implementations are used for .into() calls in generated code. @@ -1116,6 +1149,7 @@ impl svbool8_t { // trait cannot have that attribute. The type conversion itself is safe and // does not involve actual SIMD operations. +// Conversions from svbool_t to wider predicate types #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] impl From for svbool2_t { #[inline(always)] @@ -1140,12 +1174,41 @@ impl From for svbool8_t { } } +// Conversions from wider predicate types to svbool_t +// These implementations use transmute_copy for bit-level conversion. +// No target feature is required since transmute_copy is a pure bit-level +// operation that doesn't involve SVE instructions. +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool_t { + #[inline(always)] + fn from(x: svbool2_t) -> Self { + unsafe { core::mem::transmute_copy(&x) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool_t { + #[inline(always)] + fn from(x: svbool4_t) -> Self { + unsafe { core::mem::transmute_copy(&x) } + } +} + +#[unstable(feature = "stdarch_aarch64_sve", issue = "none")] +impl From for svbool_t { + #[inline(always)] + fn from(x: svbool8_t) -> Self { + unsafe { core::mem::transmute_copy(&x) } + } +} + // ============================================================================ -// Type Conversion Traits +// Vector Type Conversion Traits // ============================================================================ // // These traits are used in generated code for converting between signed and -// unsigned vector types. +// unsigned vector types. They provide a consistent API for type conversions +// across all SVE vector types (single vectors and tuple vectors). /// Trait for converting to unsigned vector types. #[unstable(feature = "stdarch_aarch64_sve", issue = "none")] @@ -1777,8 +1840,9 @@ impl AsSigned for svint64x4_t { // LLVM Type Aliases // ============================================================================ // -// These type aliases map LLVM machine representations to Rust types for use -// in code generated by the code generator. +// These type aliases map LLVM machine representations (nxv* types) to Rust +// SVE types. They are used by the code generator to match LLVM intrinsic +// signatures with Rust type definitions. // Signed integer type aliases #[unstable(feature = "stdarch_aarch64_sve", issue = "none")]