[llvm] 2dda529 - [AArch64] Fix Fold of Compare with Right-shifted Value (#127209)

via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 17 09:44:12 PST 2025


Author: Marius Kamp
Date: 2025-02-17T17:44:08Z
New Revision: 2dda529838e622e7a79b1e26d2899f319fd7e379

URL: https://github.com/llvm/llvm-project/commit/2dda529838e622e7a79b1e26d2899f319fd7e379
DIFF: https://github.com/llvm/llvm-project/commit/2dda529838e622e7a79b1e26d2899f319fd7e379.diff

LOG: [AArch64] Fix Fold of Compare with Right-shifted Value (#127209)

This change folds (setcc ne (lshr x c) 0) for 64-bit types and constants
c >= 32. This fold already existed for other types or smaller constants
but was not applicable to 64-bit types and constants >= 32 due to a
comparison of the constant c with the bit size of the setcc operation.
The type of this operation is legalized to i32, which does not
necessarily match the type of the lshr operation. Use the bit size of
the type of the lshr operation instead for the comparison.
    
Fixes #122380.

Added: 
    llvm/test/CodeGen/AArch64/shift-const-ne-0.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4263be1098899..8f849af6f4d35 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -25070,10 +25070,10 @@ static SDValue performSETCCCombine(SDNode *N,
   // setcc (srl x, imm), 0, ne ==> setcc (and x, (-1 << imm)), 0, ne
   if (Cond == ISD::SETNE && isNullConstant(RHS) &&
       LHS->getOpcode() == ISD::SRL && isa<ConstantSDNode>(LHS->getOperand(1)) &&
-      LHS->getConstantOperandVal(1) < VT.getScalarSizeInBits() &&
       LHS->hasOneUse()) {
     EVT TstVT = LHS->getValueType(0);
-    if (TstVT.isScalarInteger() && TstVT.getFixedSizeInBits() <= 64) {
+    if (TstVT.isScalarInteger() && TstVT.getFixedSizeInBits() <= 64 &&
+        LHS->getConstantOperandVal(1) < TstVT.getFixedSizeInBits()) {
       // this pattern will get better opt in emitComparison
       uint64_t TstImm = -1ULL << LHS->getConstantOperandVal(1);
       SDValue TST = DAG.getNode(ISD::AND, DL, TstVT, LHS->getOperand(0),

diff  --git a/llvm/test/CodeGen/AArch64/shift-const-ne-0.ll b/llvm/test/CodeGen/AArch64/shift-const-ne-0.ll
new file mode 100644
index 0000000000000..be064d591613c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/shift-const-ne-0.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=aarch64-unknown-unknown < %s -o -| FileCheck %s
+
+define i1 @lsr_1_ne_0_16(i16 %x) {
+; CHECK-LABEL: lsr_1_ne_0_16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0xfffe
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %shr = lshr i16 %x, 1
+  %cmp = icmp ne i16 %shr, 0
+  ret i1 %cmp
+}
+
+define i1 @lsr_1_ne_0_32(i32 %x) {
+; CHECK-LABEL: lsr_1_ne_0_32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0xfffffffe
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %shr = lshr i32 %x, 1
+  %cmp = icmp ne i32 %shr, 0
+  ret i1 %cmp
+}
+
+define i1 @lsr_30_ne_0_32(i32 %x) {
+; CHECK-LABEL: lsr_30_ne_0_32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0xc0000000
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %shr = lshr i32 %x, 30
+  %cmp = icmp ne i32 %shr, 0
+  ret i1 %cmp
+}
+
+define i1 @lsr_31_ne_0_32(i32 %x) {
+; CHECK-LABEL: lsr_31_ne_0_32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsr w0, w0, #31
+; CHECK-NEXT:    ret
+  %shr = lshr i32 %x, 31
+  %cmp = icmp ne i32 %shr, 0
+  ret i1 %cmp
+}
+
+define i1 @lsr_1_ne_0_64(i64 %x) {
+; CHECK-LABEL: lsr_1_ne_0_64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst x0, #0xfffffffffffffffe
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %shr = lshr i64 %x, 1
+  %cmp = icmp ne i64 %shr, 0
+  ret i1 %cmp
+}
+
+define i1 @lsr_31_ne_0_64(i64 %x) {
+; CHECK-LABEL: lsr_31_ne_0_64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst x0, #0xffffffff80000000
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %shr = lshr i64 %x, 31
+  %cmp = icmp ne i64 %shr, 0
+  ret i1 %cmp
+}
+
+define i1 @lsr_32_ne_0_64(i64 %x) {
+; CHECK-LABEL: lsr_32_ne_0_64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst x0, #0xffffffff00000000
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %shr = lshr i64 %x, 32
+  %cmp = icmp ne i64 %shr, 0
+  ret i1 %cmp
+}
+
+define i1 @lsr_33_ne_0_64(i64 %x) {
+; CHECK-LABEL: lsr_33_ne_0_64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst x0, #0xfffffffe00000000
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %shr = lshr i64 %x, 33
+  %cmp = icmp ne i64 %shr, 0
+  ret i1 %cmp
+}
+
+define i1 @lsr_62_ne_0_64(i64 %x) {
+; CHECK-LABEL: lsr_62_ne_0_64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst x0, #0xc000000000000000
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %shr = lshr i64 %x, 62
+  %cmp = icmp ne i64 %shr, 0
+  ret i1 %cmp
+}
+
+define i1 @lsr_63_ne_0_64(i64 %x) {
+; CHECK-LABEL: lsr_63_ne_0_64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsr x0, x0, #63
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %shr = lshr i64 %x, 63
+  %cmp = icmp ne i64 %shr, 0
+  ret i1 %cmp
+}
+
+define <4 x i1> @lsr_1_ne_0_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: lsr_1_ne_0_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushr v0.4h, v0.4h, #1
+; CHECK-NEXT:    cmtst v0.4h, v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %shr = lshr <4 x i16> %x, <i16 1, i16 1, i16 1, i16 1>
+  %cmp = icmp ne <4 x i16> %shr, <i16 0, i16 0, i16 0, i16 0>
+  ret <4 x i1> %cmp
+}


        


More information about the llvm-commits mailing list