[llvm] 6f4d9d1 - Revert "[AArch64][SVE] Add intrinsics for SVE2 bitwise ternary operations"

Nico Weber via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 20 12:11:37 PST 2020


Author: Nico Weber
Date: 2020-02-20T15:11:13-05:00
New Revision: 6f4d9d10293d8e93d7cf3397e3a82ac19c8b629f

URL: https://github.com/llvm/llvm-project/commit/6f4d9d10293d8e93d7cf3397e3a82ac19c8b629f
DIFF: https://github.com/llvm/llvm-project/commit/6f4d9d10293d8e93d7cf3397e3a82ac19c8b629f.diff

LOG: Revert "[AArch64][SVE] Add intrinsics for SVE2 bitwise ternary operations"

This reverts commit ce70e2899879e092b153a4078b993833b6696713.
It broke MC/AArch64/SVE2/bsl-diagnostics.s everywhere.

Added: 
    

Modified: 
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td

Removed: 
    llvm/test/CodeGen/AArch64/sve2-bitwise-ternary.ll


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index a3234b3bdd5a..a3fb5fe840a4 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -2001,17 +2001,10 @@ def int_aarch64_sve_sbclt       : AdvSIMD_3VectorArg_Intrinsic;
 //
 // SVE2 - Polynomial arithmetic
 //
+
 def int_aarch64_sve_eorbt       : AdvSIMD_3VectorArg_Intrinsic;
 def int_aarch64_sve_eortb       : AdvSIMD_3VectorArg_Intrinsic;
 def int_aarch64_sve_pmullb_pair : AdvSIMD_2VectorArg_Intrinsic;
 def int_aarch64_sve_pmullt_pair : AdvSIMD_2VectorArg_Intrinsic;
 
-// SVE2 bitwise ternary operations.
-def int_aarch64_sve_eor3   : AdvSIMD_3VectorArg_Intrinsic;
-def int_aarch64_sve_bcax   : AdvSIMD_3VectorArg_Intrinsic;
-def int_aarch64_sve_bsl    : AdvSIMD_3VectorArg_Intrinsic;
-def int_aarch64_sve_bsl1n  : AdvSIMD_3VectorArg_Intrinsic;
-def int_aarch64_sve_bsl2n  : AdvSIMD_3VectorArg_Intrinsic;
-def int_aarch64_sve_nbsl   : AdvSIMD_3VectorArg_Intrinsic;
-
 }

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 59c478a3a386..f8819c6b9b14 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1771,12 +1771,12 @@ let Predicates = [HasSVE2] in {
   defm FMLSLT_ZZZ_SHH : sve2_fp_mla_long<0b11, "fmlslt", int_aarch64_sve_fmlslt>;
 
   // SVE2 bitwise ternary operations
-  defm EOR3_ZZZZ  : sve2_int_bitwise_ternary_op<0b000, "eor3",  int_aarch64_sve_eor3>;
-  defm BCAX_ZZZZ  : sve2_int_bitwise_ternary_op<0b010, "bcax",  int_aarch64_sve_bcax>;
-  defm BSL_ZZZZ   : sve2_int_bitwise_ternary_op<0b001, "bsl",   int_aarch64_sve_bsl>;
-  defm BSL1N_ZZZZ : sve2_int_bitwise_ternary_op<0b011, "bsl1n", int_aarch64_sve_bsl1n>;
-  defm BSL2N_ZZZZ : sve2_int_bitwise_ternary_op<0b101, "bsl2n", int_aarch64_sve_bsl2n>;
-  defm NBSL_ZZZZ  : sve2_int_bitwise_ternary_op<0b111, "nbsl",  int_aarch64_sve_nbsl>;
+  defm EOR3_ZZZZ_D  : sve2_int_bitwise_ternary_op<0b000, "eor3">;
+  defm BCAX_ZZZZ_D  : sve2_int_bitwise_ternary_op<0b010, "bcax">;
+  def BSL_ZZZZ_D    : sve2_int_bitwise_ternary_op_d<0b001, "bsl">;
+  def BSL1N_ZZZZ_D  : sve2_int_bitwise_ternary_op_d<0b011, "bsl1n">;
+  def BSL2N_ZZZZ_D  : sve2_int_bitwise_ternary_op_d<0b101, "bsl2n">;
+  def NBSL_ZZZZ_D   : sve2_int_bitwise_ternary_op_d<0b111, "nbsl">;
 
   // SVE2 bitwise xor and rotate right by immediate
   defm XAR_ZZZI : sve2_int_rotate_right_imm<"xar">;

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 8c02b3a95dfe..757743084721 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -3766,7 +3766,7 @@ class sve2_int_bitwise_ternary_op_d<bits<3> opc, string asm>
   let ElementSize = ElementSizeNone;
 }
 
-multiclass sve2_int_bitwise_ternary_op<bits<3> opc, string asm, SDPatternOperator op> {
+multiclass sve2_int_bitwise_ternary_op<bits<3> opc, string asm> {
   def NAME : sve2_int_bitwise_ternary_op_d<opc, asm>;
 
   def : InstAlias<asm # "\t$Zdn, $Zdn, $Zm, $Zk",
@@ -3775,11 +3775,6 @@ multiclass sve2_int_bitwise_ternary_op<bits<3> opc, string asm, SDPatternOperato
                   (!cast<Instruction>(NAME) ZPR16:$Zdn, ZPR16:$Zm, ZPR16:$Zk), 1>;
   def : InstAlias<asm # "\t$Zdn, $Zdn, $Zm, $Zk",
                   (!cast<Instruction>(NAME) ZPR32:$Zdn, ZPR32:$Zm, ZPR32:$Zk), 1>;
-
-  def : SVE_3_Op_Pat<nxv16i8, op, nxv16i8, nxv16i8, nxv16i8, !cast<Instruction>(NAME)>;
-  def : SVE_3_Op_Pat<nxv8i16, op, nxv8i16, nxv8i16, nxv8i16, !cast<Instruction>(NAME)>;
-  def : SVE_3_Op_Pat<nxv4i32, op, nxv4i32, nxv4i32, nxv4i32, !cast<Instruction>(NAME)>;
-  def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv2i64, nxv2i64, !cast<Instruction>(NAME)>;
 }
 
 class sve2_int_rotate_right_imm<bits<4> tsz8_64, string asm,

diff  --git a/llvm/test/CodeGen/AArch64/sve2-bitwise-ternary.ll b/llvm/test/CodeGen/AArch64/sve2-bitwise-ternary.ll
deleted file mode 100644
index 8745f7f96e64..000000000000
--- a/llvm/test/CodeGen/AArch64/sve2-bitwise-ternary.ll
+++ /dev/null
@@ -1,284 +0,0 @@
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
-
-;
-; EOR3 (vector, bitwise, unpredicated)
-;
-define <vscale x 16 x i8> @eor3_i8(<vscale x 16 x i8> %a,
-                                   <vscale x 16 x i8> %b,
-                                   <vscale x 16 x i8> %c) {
-; CHECK-LABEL: eor3_i8
-; CHECK: eor3 z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.eor3.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
-  ret <vscale x 16 x i8> %res
-}
-
-define <vscale x 8 x i16> @eor3_i16(<vscale x 8 x i16> %a,
-                                    <vscale x 8 x i16> %b,
-                                    <vscale x 8 x i16> %c) {
-; CHECK-LABEL: eor3_i16
-; CHECK: eor3 z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.eor3.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
-  ret <vscale x 8 x i16> %res
-}
-
-define <vscale x 4 x i32> @eor3_i32(<vscale x 4 x i32> %a,
-                                    <vscale x 4 x i32> %b,
-                                    <vscale x 4 x i32> %c) {
-; CHECK-LABEL: eor3_i32
-; CHECK: eor3 z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.eor3.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
-  ret <vscale x 4 x i32> %res
-}
-
-define <vscale x 2 x i64> @eor3_i64(<vscale x 2 x i64> %a,
-                                    <vscale x 2 x i64> %b,
-                                    <vscale x 2 x i64> %c) {
-; CHECK-LABEL: eor3_i64
-; CHECK: eor3 z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.eor3.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
-  ret <vscale x 2 x i64> %res
-}
-
-;
-; BCAX (vector, bitwise, unpredicated)
-;
-define <vscale x 16 x i8> @bcax_i8(<vscale x 16 x i8> %a,
-                                   <vscale x 16 x i8> %b,
-                                   <vscale x 16 x i8> %c) {
-; CHECK-LABEL: bcax_i8
-; CHECK: bcax z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.bcax.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
-  ret <vscale x 16 x i8> %res
-}
-
-define <vscale x 8 x i16> @bcax_i16(<vscale x 8 x i16> %a,
-                                    <vscale x 8 x i16> %b,
-                                    <vscale x 8 x i16> %c) {
-; CHECK-LABEL: bcax_i16
-; CHECK: bcax z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.bcax.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
-  ret <vscale x 8 x i16> %res
-}
-
-define <vscale x 4 x i32> @bcax_i32(<vscale x 4 x i32> %a,
-                                    <vscale x 4 x i32> %b,
-                                    <vscale x 4 x i32> %c) {
-; CHECK-LABEL: bcax_i32
-; CHECK: bcax z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.bcax.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
-  ret <vscale x 4 x i32> %res
-}
-
-define <vscale x 2 x i64> @bcax_i64(<vscale x 2 x i64> %a,
-                                    <vscale x 2 x i64> %b,
-                                    <vscale x 2 x i64> %c) {
-; CHECK-LABEL: bcax_i64
-; CHECK: bcax z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.bcax.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
-  ret <vscale x 2 x i64> %res
-}
-
-;
-; BSL (vector, bitwise, unpredicated)
-;
-define <vscale x 16 x i8> @bsl_i8(<vscale x 16 x i8> %a,
-                                  <vscale x 16 x i8> %b,
-                                  <vscale x 16 x i8> %c) {
-; CHECK-LABEL: bsl_i8
-; CHECK: bsl z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
-  ret <vscale x 16 x i8> %res
-}
-
-define <vscale x 8 x i16> @bsl_i16(<vscale x 8 x i16> %a,
-                                   <vscale x 8 x i16> %b,
-                                   <vscale x 8 x i16> %c) {
-; CHECK-LABEL: bsl_i16
-; CHECK: bsl z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
-  ret <vscale x 8 x i16> %res
-}
-
-define <vscale x 4 x i32> @bsl_i32(<vscale x 4 x i32> %a,
-                                   <vscale x 4 x i32> %b,
-                                   <vscale x 4 x i32> %c) {
-; CHECK-LABEL: bsl_i32
-; CHECK: bsl z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
-  ret <vscale x 4 x i32> %res
-}
-
-define <vscale x 2 x i64> @bsl_i64(<vscale x 2 x i64> %a,
-                                   <vscale x 2 x i64> %b,
-                                   <vscale x 2 x i64> %c) {
-; CHECK-LABEL: bsl_i64
-; CHECK: bsl z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
-  ret <vscale x 2 x i64> %res
-}
-
-;
-; BSL1N (vector, bitwise, unpredicated)
-;
-define <vscale x 16 x i8> @bsl1n_i8(<vscale x 16 x i8> %a,
-                                    <vscale x 16 x i8> %b,
-                                    <vscale x 16 x i8> %c) {
-; CHECK-LABEL: bsl1n_i8
-; CHECK: bsl1n z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl1n.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
-  ret <vscale x 16 x i8> %res
-}
-
-define <vscale x 8 x i16> @bsl1n_i16(<vscale x 8 x i16> %a,
-                                     <vscale x 8 x i16> %b,
-                                     <vscale x 8 x i16> %c) {
-; CHECK-LABEL: bsl1n_i16
-; CHECK: bsl1n z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl1n.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
-  ret <vscale x 8 x i16> %res
-}
-
-define <vscale x 4 x i32> @bsl1n_i32(<vscale x 4 x i32> %a,
-                                     <vscale x 4 x i32> %b,
-                                     <vscale x 4 x i32> %c) {
-; CHECK-LABEL: bsl1n_i32
-; CHECK: bsl1n z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl1n.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
-  ret <vscale x 4 x i32> %res
-}
-
-define <vscale x 2 x i64> @bsl1n_i64(<vscale x 2 x i64> %a,
-                                     <vscale x 2 x i64> %b,
-                                     <vscale x 2 x i64> %c) {
-; CHECK-LABEL: bsl1n_i64
-; CHECK: bsl1n z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl1n.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
-  ret <vscale x 2 x i64> %res
-}
-
-;
-; BSL2N (vector, bitwise, unpredicated)
-;
-define <vscale x 16 x i8> @bsl2n_i8(<vscale x 16 x i8> %a,
-                                    <vscale x 16 x i8> %b,
-                                    <vscale x 16 x i8> %c) {
-; CHECK-LABEL: bsl2n_i8
-; CHECK: bsl2n z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl2n.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
-  ret <vscale x 16 x i8> %res
-}
-
-define <vscale x 8 x i16> @bsl2n_i16(<vscale x 8 x i16> %a,
-                                     <vscale x 8 x i16> %b,
-                                     <vscale x 8 x i16> %c) {
-; CHECK-LABEL: bsl2n_i16
-; CHECK: bsl2n z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl2n.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
-  ret <vscale x 8 x i16> %res
-}
-
-define <vscale x 4 x i32> @bsl2n_i32(<vscale x 4 x i32> %a,
-                                     <vscale x 4 x i32> %b,
-                                     <vscale x 4 x i32> %c) {
-; CHECK-LABEL: bsl2n_i32
-; CHECK: bsl2n z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl2n.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
-  ret <vscale x 4 x i32> %res
-}
-
-define <vscale x 2 x i64> @bsl2n_i64(<vscale x 2 x i64> %a,
-                                     <vscale x 2 x i64> %b,
-                                     <vscale x 2 x i64> %c) {
-; CHECK-LABEL: bsl2n_i64
-; CHECK: bsl2n z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl2n.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
-  ret <vscale x 2 x i64> %res
-}
-
-;
-; NBSL (vector, bitwise, unpredicated)
-;
-define <vscale x 16 x i8> @nbsl_i8(<vscale x 16 x i8> %a,
-                                   <vscale x 16 x i8> %b,
-                                   <vscale x 16 x i8> %c) {
-; CHECK-LABEL: nbsl_i8
-; CHECK: nbsl z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.nbsl.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
-  ret <vscale x 16 x i8> %res
-}
-
-define <vscale x 8 x i16> @nbsl_i16(<vscale x 8 x i16> %a,
-                                    <vscale x 8 x i16> %b,
-                                    <vscale x 8 x i16> %c) {
-; CHECK-LABEL: nbsl_i16
-; CHECK: nbsl z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.nbsl.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
-  ret <vscale x 8 x i16> %res
-}
-
-define <vscale x 4 x i32> @nbsl_i32(<vscale x 4 x i32> %a,
-                                    <vscale x 4 x i32> %b,
-                                    <vscale x 4 x i32> %c) {
-; CHECK-LABEL: nbsl_i32
-; CHECK: nbsl z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.nbsl.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
-  ret <vscale x 4 x i32> %res
-}
-
-define <vscale x 2 x i64> @nbsl_i64(<vscale x 2 x i64> %a,
-                                    <vscale x 2 x i64> %b,
-                                    <vscale x 2 x i64> %c) {
-; CHECK-LABEL: nbsl_i64
-; CHECK: nbsl z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.nbsl.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
-  ret <vscale x 2 x i64> %res
-}
-
-declare <vscale x 16 x i8> @llvm.aarch64.sve.eor3.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.eor3.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.eor3.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.eor3.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.bcax.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.bcax.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.bcax.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.bcax.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.bsl.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.bsl.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.bsl.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.bsl.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.bsl1n.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.bsl1n.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.bsl1n.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.bsl1n.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.bsl2n.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.bsl2n.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.bsl2n.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.bsl2n.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>)
-declare <vscale x 16 x i8> @llvm.aarch64.sve.nbsl.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.nbsl.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.nbsl.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.nbsl.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>)


        


More information about the llvm-commits mailing list