[llvm] d95cf1f - [SVE] Enable ISD::ABDS/U ISel for scalable vectors.

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 25 04:32:03 PST 2022


Author: Paul Walker
Date: 2022-01-25T12:14:53Z
New Revision: d95cf1f6cf4242ae9f045b8032b9e4c08d41a12f

URL: https://github.com/llvm/llvm-project/commit/d95cf1f6cf4242ae9f045b8032b9e4c08d41a12f
DIFF: https://github.com/llvm/llvm-project/commit/d95cf1f6cf4242ae9f045b8032b9e4c08d41a12f.diff

LOG: [SVE] Enable ISD::ABDS/U ISel for scalable vectors.

NOTE: This patch also includes tests that highlight those cases
where the existing DAG combine doesn't yet work well for SVE.

Differential Revision: https://reviews.llvm.org/D117873

Added: 
    llvm/test/CodeGen/AArch64/sve-abd.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index fd35ab2049e92..fc6e0b865681c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1206,6 +1206,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::SRL, VT, Custom);
       setOperationAction(ISD::SRA, VT, Custom);
       setOperationAction(ISD::ABS, VT, Custom);
+      setOperationAction(ISD::ABDS, VT, Custom);
+      setOperationAction(ISD::ABDU, VT, Custom);
       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
@@ -1994,6 +1996,8 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
     MAKE_CASE(AArch64ISD::CSINC)
     MAKE_CASE(AArch64ISD::THREAD_POINTER)
     MAKE_CASE(AArch64ISD::TLSDESC_CALLSEQ)
+    MAKE_CASE(AArch64ISD::ABDS_PRED)
+    MAKE_CASE(AArch64ISD::ABDU_PRED)
     MAKE_CASE(AArch64ISD::ADD_PRED)
     MAKE_CASE(AArch64ISD::MUL_PRED)
     MAKE_CASE(AArch64ISD::MULHS_PRED)
@@ -5196,6 +5200,10 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
     return LowerFixedLengthVectorSelectToSVE(Op, DAG);
   case ISD::ABS:
     return LowerABS(Op, DAG);
+  case ISD::ABDS:
+    return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDS_PRED);
+  case ISD::ABDU:
+    return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDU_PRED);
   case ISD::BITREVERSE:
     return LowerBitreverse(Op, DAG);
   case ISD::BSWAP:

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 9841a4c048632..df19a4729bb49 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -77,6 +77,8 @@ enum NodeType : unsigned {
   SBC, // adc, sbc instructions
 
   // Predicated instructions where inactive lanes produce undefined results.
+  ABDS_PRED,
+  ABDU_PRED,
   ADD_PRED,
   FADD_PRED,
   FDIV_PRED,

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 63f8f58e76c53..63cd8f476272a 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -189,11 +189,13 @@ def AArch64fsub_p : SDNode<"AArch64ISD::FSUB_PRED", SDT_AArch64Arith>;
 def AArch64lsl_p  : SDNode<"AArch64ISD::SHL_PRED",  SDT_AArch64Arith>;
 def AArch64lsr_p  : SDNode<"AArch64ISD::SRL_PRED",  SDT_AArch64Arith>;
 def AArch64mul_p  : SDNode<"AArch64ISD::MUL_PRED",  SDT_AArch64Arith>;
+def AArch64sabd_p : SDNode<"AArch64ISD::ABDS_PRED", SDT_AArch64Arith>;
 def AArch64sdiv_p : SDNode<"AArch64ISD::SDIV_PRED", SDT_AArch64Arith>;
 def AArch64smax_p : SDNode<"AArch64ISD::SMAX_PRED", SDT_AArch64Arith>;
 def AArch64smin_p : SDNode<"AArch64ISD::SMIN_PRED", SDT_AArch64Arith>;
 def AArch64smulh_p : SDNode<"AArch64ISD::MULHS_PRED", SDT_AArch64Arith>;
 def AArch64sub_p  : SDNode<"AArch64ISD::SUB_PRED",  SDT_AArch64Arith>;
+def AArch64uabd_p : SDNode<"AArch64ISD::ABDU_PRED", SDT_AArch64Arith>;
 def AArch64udiv_p : SDNode<"AArch64ISD::UDIV_PRED", SDT_AArch64Arith>;
 def AArch64umax_p : SDNode<"AArch64ISD::UMAX_PRED", SDT_AArch64Arith>;
 def AArch64umin_p : SDNode<"AArch64ISD::UMIN_PRED", SDT_AArch64Arith>;
@@ -418,6 +420,8 @@ let Predicates = [HasSVEorStreamingSVE] in {
   defm UMAX_ZPZZ : sve_int_bin_pred_bhsd<AArch64umax_p>;
   defm SMIN_ZPZZ : sve_int_bin_pred_bhsd<AArch64smin_p>;
   defm UMIN_ZPZZ : sve_int_bin_pred_bhsd<AArch64umin_p>;
+  defm SABD_ZPZZ : sve_int_bin_pred_bhsd<AArch64sabd_p>;
+  defm UABD_ZPZZ : sve_int_bin_pred_bhsd<AArch64uabd_p>;
 
   defm FRECPE_ZZ  : sve_fp_2op_u_zd<0b110, "frecpe",  AArch64frecpe>;
   defm FRSQRTE_ZZ : sve_fp_2op_u_zd<0b111, "frsqrte", AArch64frsqrte>;

diff  --git a/llvm/test/CodeGen/AArch64/sve-abd.ll b/llvm/test/CodeGen/AArch64/sve-abd.ll
new file mode 100644
index 0000000000000..affd6d5b15f79
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-abd.ll
@@ -0,0 +1,267 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; SABD
+;
+
+define <vscale x 16 x i8> @sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
+; CHECK-LABEL: sabd_b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    sabd z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    ret
+  %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i16>
+  %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i16>
+  %sub = sub <vscale x 16 x i16> %a.sext, %b.sext
+  %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true)
+  %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %trunc
+}
+
+define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
+; CHECK-LABEL: sabd_b_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ptrue p2.b
+; CHECK-NEXT:    sub z0.b, z0.b, z1.b
+; CHECK-NEXT:    abs z0.b, p2/m, z0.b
+; CHECK-NEXT:    ret
+  %a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+  %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
+  %sub = sub <vscale x 16 x i8> %a.sext, %b.sext
+  %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
+  ret <vscale x 16 x i8> %abs
+}
+
+define <vscale x 8 x i16> @sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
+; CHECK-LABEL: sabd_h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    sabd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %a.sext = sext <vscale x 8 x i16> %a to <vscale x 8 x i32>
+  %b.sext = sext <vscale x 8 x i16> %b to <vscale x 8 x i32>
+  %sub = sub <vscale x 8 x i32> %a.sext, %b.sext
+  %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true)
+  %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %trunc
+}
+
+define <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
+; CHECK-LABEL: sabd_h_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
+; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
+; CHECK-NEXT:    sub z0.h, z0.h, z1.h
+; CHECK-NEXT:    abs z0.h, p0/m, z0.h
+; CHECK-NEXT:    ret
+  %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+  %sub = sub <vscale x 8 x i16> %a.sext, %b.sext
+  %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+  ret <vscale x 8 x i16> %abs
+}
+
+define <vscale x 4 x i32> @sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
+; CHECK-LABEL: sabd_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    sabd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %a.sext = sext <vscale x 4 x i32> %a to <vscale x 4 x i64>
+  %b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64>
+  %sub = sub <vscale x 4 x i64> %a.sext, %b.sext
+  %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+  %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %trunc
+}
+
+define <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
+; CHECK-LABEL: sabd_s_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
+; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
+; CHECK-NEXT:    sub z0.s, z0.s, z1.s
+; CHECK-NEXT:    abs z0.s, p0/m, z0.s
+; CHECK-NEXT:    ret
+  %a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+  %sub = sub <vscale x 4 x i32> %a.sext, %b.sext
+  %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+  ret <vscale x 4 x i32> %abs
+}
+
+define <vscale x 2 x i64> @sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
+; CHECK-LABEL: sabd_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    sabd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %a.sext = sext <vscale x 2 x i64> %a to <vscale x 2 x i128>
+  %b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128>
+  %sub = sub <vscale x 2 x i128> %a.sext, %b.sext
+  %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true)
+  %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %trunc
+}
+
+define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
+; CHECK-LABEL: sabd_d_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
+; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
+; CHECK-NEXT:    sub z0.d, z0.d, z1.d
+; CHECK-NEXT:    abs z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
+  %a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+  %sub = sub <vscale x 2 x i64> %a.sext, %b.sext
+  %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
+  ret <vscale x 2 x i64> %abs
+}
+
+;
+; UABD
+;
+
+define <vscale x 16 x i8> @uabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
+; CHECK-LABEL: uabd_b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    uabd z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i16>
+  %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
+  %sub = sub <vscale x 16 x i16> %a.zext, %b.zext
+  %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true)
+  %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %trunc
+}
+
+define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
+; CHECK-LABEL: uabd_b_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.b, p0/z, #1 // =0x1
+; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ptrue p2.b
+; CHECK-NEXT:    add z0.b, z0.b, z1.b
+; CHECK-NEXT:    abs z0.b, p2/m, z0.b
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+  %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
+  %sub = sub <vscale x 16 x i8> %a.zext, %b.zext
+  %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
+  ret <vscale x 16 x i8> %abs
+}
+
+define <vscale x 8 x i16> @uabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
+; CHECK-LABEL: uabd_h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    uabd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 8 x i16> %a to <vscale x 8 x i32>
+  %b.zext = zext <vscale x 8 x i16> %b to <vscale x 8 x i32>
+  %sub = sub <vscale x 8 x i32> %a.zext, %b.zext
+  %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true)
+  %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %trunc
+}
+
+define <vscale x 8 x i16> @uabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
+; CHECK-LABEL: uabd_h_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z0.h, z0.h, #0xff
+; CHECK-NEXT:    and z1.h, z1.h, #0xff
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    sub z0.h, z0.h, z1.h
+; CHECK-NEXT:    abs z0.h, p0/m, z0.h
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+  %sub = sub <vscale x 8 x i16> %a.zext, %b.zext
+  %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+  ret <vscale x 8 x i16> %abs
+}
+
+define <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
+; CHECK-LABEL: uabd_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    uabd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
+  %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
+  %sub = sub <vscale x 4 x i64> %a.zext, %b.zext
+  %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+  %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %trunc
+}
+
+define <vscale x 4 x i32> @uabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
+; CHECK-LABEL: uabd_s_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z0.s, z0.s, #0xffff
+; CHECK-NEXT:    and z1.s, z1.s, #0xffff
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    sub z0.s, z0.s, z1.s
+; CHECK-NEXT:    abs z0.s, p0/m, z0.s
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+  %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
+  %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+  ret <vscale x 4 x i32> %abs
+}
+
+define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
+; CHECK-LABEL: uabd_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    uabd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 2 x i64> %a to <vscale x 2 x i128>
+  %b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128>
+  %sub = sub <vscale x 2 x i128> %a.zext, %b.zext
+  %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true)
+  %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %trunc
+}
+
+define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
+; CHECK-LABEL: uabd_d_promoted_ops:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z0.d, z0.d, #0xffffffff
+; CHECK-NEXT:    and z1.d, z1.d, #0xffffffff
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    sub z0.d, z0.d, z1.d
+; CHECK-NEXT:    abs z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
+  %a.zext = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+  %sub = sub <vscale x 2 x i64> %a.zext, %b.zext
+  %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
+  ret <vscale x 2 x i64> %abs
+}
+
+declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
+
+declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
+declare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1)
+
+declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
+declare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1)
+
+declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
+declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1)
+
+declare <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128>, i1)
+
+attributes #0 = { "target-features"="+neon,+sve" }


        


More information about the llvm-commits mailing list