[llvm] 673da8c - [LLVM][CodeGen][AArch64] Remove bogus lowering of sve_while{gt/ge/hi/hs} intrinsics. (#88126)

via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 15 04:51:10 PDT 2024


Author: Paul Walker
Date: 2024-04-15T12:51:06+01:00
New Revision: 673da8c83413c4d1e7e76e1b52a2924e837e7221

URL: https://github.com/llvm/llvm-project/commit/673da8c83413c4d1e7e76e1b52a2924e837e7221
DIFF: https://github.com/llvm/llvm-project/commit/673da8c83413c4d1e7e76e1b52a2924e837e7221.diff

LOG: [LLVM][CodeGen][AArch64] Remove bogus lowering of sve_while{gt/ge/hi/hs} intrinsics. (#88126)

When fed constant operands we try to lower WHILE intrinsics to PTRUE
using a fixed length pattern. This is not valid for the decrementing
variants of WHILE because they construct their result predicate vector
by traversing from high->low lanes whereas the incrementing variants and
PTRUE traverse from low->high lanes.

Whilst we can still utilise PTRUE by reversing its result I figure
replacing a single WHILE with multiple SVE instructions is likely
counterproductive.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve2-intrinsics-while.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 80181a77c9d238..27dc79f3f8d0c3 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5035,8 +5035,8 @@ static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
                      DAG.getTargetConstant(Pattern, DL, MVT::i32));
 }
 
-static SDValue optimizeWhile(SDValue Op, SelectionDAG &DAG, bool IsSigned,
-                             bool IsLess, bool IsEqual) {
+static SDValue optimizeIncrementingWhile(SDValue Op, SelectionDAG &DAG,
+                                         bool IsSigned, bool IsEqual) {
   if (!isa<ConstantSDNode>(Op.getOperand(1)) ||
       !isa<ConstantSDNode>(Op.getOperand(2)))
     return SDValue();
@@ -5044,12 +5044,9 @@ static SDValue optimizeWhile(SDValue Op, SelectionDAG &DAG, bool IsSigned,
   SDLoc dl(Op);
   APInt X = Op.getConstantOperandAPInt(1);
   APInt Y = Op.getConstantOperandAPInt(2);
-  APInt NumActiveElems;
   bool Overflow;
-  if (IsLess)
-    NumActiveElems = IsSigned ? Y.ssub_ov(X, Overflow) : Y.usub_ov(X, Overflow);
-  else
-    NumActiveElems = IsSigned ? X.ssub_ov(Y, Overflow) : X.usub_ov(Y, Overflow);
+  APInt NumActiveElems =
+      IsSigned ? Y.ssub_ov(X, Overflow) : Y.usub_ov(X, Overflow);
 
   if (Overflow)
     return SDValue();
@@ -5396,29 +5393,17 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return SDValue();
   }
   case Intrinsic::aarch64_sve_whilelo:
-    return optimizeWhile(Op, DAG, /*IsSigned=*/false, /*IsLess=*/true,
-                         /*IsEqual=*/false);
+    return optimizeIncrementingWhile(Op, DAG, /*IsSigned=*/false,
+                                     /*IsEqual=*/false);
   case Intrinsic::aarch64_sve_whilelt:
-    return optimizeWhile(Op, DAG, /*IsSigned=*/true, /*IsLess=*/true,
-                         /*IsEqual=*/false);
+    return optimizeIncrementingWhile(Op, DAG, /*IsSigned=*/true,
+                                     /*IsEqual=*/false);
   case Intrinsic::aarch64_sve_whilels:
-    return optimizeWhile(Op, DAG, /*IsSigned=*/false, /*IsLess=*/true,
-                         /*IsEqual=*/true);
+    return optimizeIncrementingWhile(Op, DAG, /*IsSigned=*/false,
+                                     /*IsEqual=*/true);
   case Intrinsic::aarch64_sve_whilele:
-    return optimizeWhile(Op, DAG, /*IsSigned=*/true, /*IsLess=*/true,
-                         /*IsEqual=*/true);
-  case Intrinsic::aarch64_sve_whilege:
-    return optimizeWhile(Op, DAG, /*IsSigned=*/true, /*IsLess=*/false,
-                         /*IsEqual=*/true);
-  case Intrinsic::aarch64_sve_whilegt:
-    return optimizeWhile(Op, DAG, /*IsSigned=*/true, /*IsLess=*/false,
-                         /*IsEqual=*/false);
-  case Intrinsic::aarch64_sve_whilehs:
-    return optimizeWhile(Op, DAG, /*IsSigned=*/false, /*IsLess=*/false,
-                         /*IsEqual=*/true);
-  case Intrinsic::aarch64_sve_whilehi:
-    return optimizeWhile(Op, DAG, /*IsSigned=*/false, /*IsLess=*/false,
-                         /*IsEqual=*/false);
+    return optimizeIncrementingWhile(Op, DAG, /*IsSigned=*/true,
+                                     /*IsEqual=*/true);
   case Intrinsic::aarch64_sve_sunpkhi:
     return DAG.getNode(AArch64ISD::SUNPKHI, dl, Op.getValueType(),
                        Op.getOperand(1));

diff  --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-while.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-while.ll
index 9b82d79cfa9625..2052ebf4c8bb03 100644
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-while.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-while.ll
@@ -78,68 +78,16 @@ define <vscale x 2 x i1> @whilege_d_xx(i64 %a, i64 %b) {
   ret <vscale x 2 x i1> %out
 }
 
-define <vscale x 2 x i1> @whilege_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
-; CHECK-LABEL: whilege_d_ii_dont_fold_to_ptrue_larger_than_minvec:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #3
-; CHECK-NEXT:    whilege p0.d, x8, xzr
-; CHECK-NEXT:    ret
-  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64 3, i64 0)
-  ret <vscale x 2 x i1> %out
-}
-
+; Ensure we don't convert constant decrementing while instructions to ptrue.
 define <vscale x 16 x i1> @whilege_b_ii() {
 ; CHECK-LABEL: whilege_b_ii:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.b, vl6
-; CHECK-NEXT:    ret
-entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 3, i32 -2)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilege_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
-; CHECK-LABEL: whilege_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #9
-; CHECK-NEXT:    whilege p0.b, x8, xzr
-; CHECK-NEXT:    ret
-entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 9, i64 0)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilege_b_ii_vl_maximum() vscale_range(16, 16) {
-; CHECK-LABEL: whilege_b_ii_vl_maximum:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.b, vl256
-; CHECK-NEXT:    ret
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 255, i64 0)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilege_b_ii_dont_fold_to_ptrue_overflow() {
-; CHECK-LABEL: whilege_b_ii_dont_fold_to_ptrue_overflow:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #2
-; CHECK-NEXT:    mov w9, #2147483647
-; CHECK-NEXT:    movk w8, #32768, lsl #16
+; CHECK-NEXT:    mov w8, #-2 // =0xfffffffe
+; CHECK-NEXT:    mov w9, #3 // =0x3
 ; CHECK-NEXT:    whilege p0.b, w9, w8
 ; CHECK-NEXT:    ret
 entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 2147483647, i32 -2147483646)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilege_b_ii_dont_fold_to_ptrue_increment_overflow() {
-; CHECK-LABEL: whilege_b_ii_dont_fold_to_ptrue_increment_overflow:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #2147483647
-; CHECK-NEXT:    mov w9, #-2147483641
-; CHECK-NEXT:    whilege p0.b, w9, w8
-; CHECK-NEXT:    ret
-entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 -2147483641, i32 2147483647)
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 3, i32 -2)
   ret <vscale x 16 x i1> %out
 }
 
@@ -219,69 +167,19 @@ define <vscale x 2 x i1> @whilehs_d_xx(i64 %a, i64 %b) {
   ret <vscale x 2 x i1> %out
 }
 
-define <vscale x 2 x i1> @whilehs_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
-; CHECK-LABEL: whilehs_d_ii_dont_fold_to_ptrue_larger_than_minvec:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #3
-; CHECK-NEXT:    whilehs p0.d, x8, xzr
-; CHECK-NEXT:    ret
-  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64 3, i64 0)
-  ret <vscale x 2 x i1> %out
-}
-
+; Ensure we don't convert constant decrementing while instructions to ptrue.
 define <vscale x 16 x i1> @whilehs_b_ii() {
 ; CHECK-LABEL: whilehs_b_ii:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.b, vl7
+; CHECK-NEXT:    mov w8, #2 // =0x2
+; CHECK-NEXT:    mov w9, #8 // =0x8
+; CHECK-NEXT:    whilehs p0.b, x9, x8
 ; CHECK-NEXT:    ret
 entry:
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 8, i64 2)
   ret <vscale x 16 x i1> %out
 }
 
-define <vscale x 16 x i1> @whilehs_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
-; CHECK-LABEL: whilehs_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #9
-; CHECK-NEXT:    whilehs p0.b, x8, xzr
-; CHECK-NEXT:    ret
-entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 9, i64 0)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilehs_b_ii_vl_maximum() vscale_range(16, 16) {
-; CHECK-LABEL: whilehs_b_ii_vl_maximum:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.b, vl256
-; CHECK-NEXT:    ret
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 255, i64 0)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilehs_b_ii_dont_fold_to_ptrue_overflow() {
-; CHECK-LABEL: whilehs_b_ii_dont_fold_to_ptrue_overflow:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #-1
-; CHECK-NEXT:    mov w9, #6
-; CHECK-NEXT:    whilehs p0.b, w9, w8
-; CHECK-NEXT:    ret
-entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 6, i32 4294967295)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilehs_b_ii_dont_fold_to_ptrue_increment_overflow() {
-; CHECK-LABEL: whilehs_b_ii_dont_fold_to_ptrue_increment_overflow:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #-1
-; CHECK-NEXT:    whilehs p0.b, w8, wzr
-; CHECK-NEXT:    ret
-entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 4294967295, i32 0)
-  ret <vscale x 16 x i1> %out
-}
-
 ;
 ; WHILEGT
 ;
@@ -358,55 +256,16 @@ define <vscale x 2 x i1> @whilegt_d_xx(i64 %a, i64 %b) {
   ret <vscale x 2 x i1> %out
 }
 
-define <vscale x 2 x i1> @whilegt_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
-; CHECK-LABEL: whilegt_d_ii_dont_fold_to_ptrue_larger_than_minvec:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #3
-; CHECK-NEXT:    whilegt p0.d, x8, xzr
-; CHECK-NEXT:    ret
-  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64 3, i64 0)
-  ret <vscale x 2 x i1> %out
-}
-
+; Ensure we don't convert constant decrementing while instructions to ptrue.
 define <vscale x 16 x i1> @whilegt_b_ii() {
 ; CHECK-LABEL: whilegt_b_ii:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.b, vl5
-; CHECK-NEXT:    ret
-entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 3, i32 -2)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilegt_b_ii_fold_to_ptrue_nonexistent_vl9() {
-; CHECK-LABEL: whilegt_b_ii_fold_to_ptrue_nonexistent_vl9:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #9
-; CHECK-NEXT:    whilegt p0.b, x8, xzr
-; CHECK-NEXT:    ret
-entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 9, i64 0)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilegt_b_ii_vl_maximum() vscale_range(16, 16) {
-; CHECK-LABEL: whilegt_b_ii_vl_maximum:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.b, vl256
-; CHECK-NEXT:    ret
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 256, i64 0)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilegt_b_ii_dont_fold_to_ptrue_overflow() {
-; CHECK-LABEL: whilegt_b_ii_dont_fold_to_ptrue_overflow:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #2147483647
-; CHECK-NEXT:    mov w9, #-2147483641
+; CHECK-NEXT:    mov w8, #-2 // =0xfffffffe
+; CHECK-NEXT:    mov w9, #3 // =0x3
 ; CHECK-NEXT:    whilegt p0.b, w9, w8
 ; CHECK-NEXT:    ret
 entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 -2147483641, i32 2147483647)
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 3, i32 -2)
   ret <vscale x 16 x i1> %out
 }
 
@@ -486,58 +345,19 @@ define <vscale x 2 x i1> @whilehi_d_xx(i64 %a, i64 %b) {
   ret <vscale x 2 x i1> %out
 }
 
-define <vscale x 2 x i1> @whilehi_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
-; CHECK-LABEL: whilehi_d_ii_dont_fold_to_ptrue_larger_than_minvec:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #3
-; CHECK-NEXT:    whilehi p0.d, x8, xzr
-; CHECK-NEXT:    ret
-  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64 3, i64 0)
-  ret <vscale x 2 x i1> %out
-}
-
+; Ensure we don't convert constant decrementing while instructions to ptrue.
 define <vscale x 16 x i1> @whilehi_b_ii() {
 ; CHECK-LABEL: whilehi_b_ii:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.b, vl6
+; CHECK-NEXT:    mov w8, #2 // =0x2
+; CHECK-NEXT:    mov w9, #8 // =0x8
+; CHECK-NEXT:    whilehi p0.b, x9, x8
 ; CHECK-NEXT:    ret
 entry:
   %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 8, i64 2)
   ret <vscale x 16 x i1> %out
 }
 
-define <vscale x 16 x i1> @whilehi_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
-; CHECK-LABEL: whilehi_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #9
-; CHECK-NEXT:    whilehi p0.b, x8, xzr
-; CHECK-NEXT:    ret
-entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 9, i64 0)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilehi_b_ii_vl_maximum() vscale_range(16, 16) {
-; CHECK-LABEL: whilehi_b_ii_vl_maximum:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.b, vl256
-; CHECK-NEXT:    ret
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 256, i64 0)
-  ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @whilelhi_b_ii_dont_fold_to_ptrue_overflow() {
-; CHECK-LABEL: whilelhi_b_ii_dont_fold_to_ptrue_overflow:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #-1
-; CHECK-NEXT:    mov w9, #7
-; CHECK-NEXT:    whilehi p0.b, w9, w8
-; CHECK-NEXT:    ret
-entry:
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32 7, i32 4294967295)
-  ret <vscale x 16 x i1> %out
-}
-
 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32, i32)
 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64, i64)
 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32, i32)


        


More information about the llvm-commits mailing list