[llvm] [ARM] Only change mask if demanded bits says we can optimize (PR #165106)
    via llvm-commits 
    llvm-commits at lists.llvm.org
       
    Sat Oct 25 17:51:25 PDT 2025
    
    
  
https://github.com/AZero13 updated https://github.com/llvm/llvm-project/pull/165106
>From f812585ecfc0a13f0643d351f7bf056a81d8e3bd Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Sat, 25 Oct 2025 12:27:59 -0400
Subject: [PATCH 1/2] [ARM] Only change mask if demanded bits says we can
 optimize
---
 llvm/lib/Target/ARM/ARMISelLowering.cpp       | 44 ++++++++++----
 llvm/test/CodeGen/ARM/funnel-shift-rot.ll     |  5 +-
 ...st-and-by-const-from-lshr-in-eqcmp-zero.ll | 57 ++++++++++++-------
 llvm/test/CodeGen/ARM/va_arg.ll               |  2 +-
 llvm/test/CodeGen/Thumb/branch-to-return.ll   |  2 +-
 llvm/test/CodeGen/Thumb2/active_lane_mask.ll  |  2 +-
 6 files changed, 74 insertions(+), 38 deletions(-)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 313ae3d68fb83..2ca81332ea7c9 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -20138,6 +20138,17 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
   }
 }
 
+static bool isLegalLogicalImmediate(unsigned Imm,
+                                    const ARMSubtarget *Subtarget) {
+  // Handle special cases first
+  if (!Subtarget->isThumb())
+    return ARM_AM::getSOImmVal(Imm) != -1;
+  if (Subtarget->isThumb2())
+    return ARM_AM::getT2SOImmVal(Imm) != -1;
+  // Thumb1 only has 8-bit unsigned immediate.
+  return Imm <= 255;
+}
+
 bool ARMTargetLowering::targetShrinkDemandedConstant(
     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
     TargetLoweringOpt &TLO) const {
@@ -20179,7 +20190,6 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
   // loop in obscure cases.)
   if (ExpandedMask == ~0U)
     return TLO.CombineTo(Op, Op.getOperand(0));
-
   auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool {
     return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
   };
@@ -20192,22 +20202,33 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
     return TLO.CombineTo(Op, NewOp);
   };
 
-  // Prefer uxtb mask.
+  // If thumb, check for uxth and uxtb masks.
+  if (Subtarget->isThumb1Only()) {
+    if (IsLegalMask(0xFF))
+      return UseMask(0xFF);
+
+    if (IsLegalMask(0xFF00))
+      return UseMask(0xFF00);
+  }
+
+  // Don't optimize if it is legal already.
+  if (isLegalLogicalImmediate(Mask, Subtarget))
+    return false;
+
+  // bic
+  if (isLegalLogicalImmediate(~Mask, Subtarget))
+    return false;
+
   if (IsLegalMask(0xFF))
     return UseMask(0xFF);
 
-  // Prefer uxth mask.
-  if (IsLegalMask(0xFFFF))
-    return UseMask(0xFFFF);
+  if (IsLegalMask(0xFF00))
+    return UseMask(0xFF00);
 
-  // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
-  // FIXME: Prefer a contiguous sequence of bits for other optimizations.
-  if (ShrunkMask < 256)
+  if (isLegalLogicalImmediate(ShrunkMask, Subtarget))
     return UseMask(ShrunkMask);
 
-  // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
-  // FIXME: Prefer a contiguous sequence of bits for other optimizations.
-  if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256)
+  if (isLegalLogicalImmediate(~ExpandedMask, Subtarget))
     return UseMask(ExpandedMask);
 
   // Potential improvements:
@@ -20215,7 +20236,6 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
   // We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
   // We could try to prefer Thumb1 immediates which can be lowered to a
   // two-instruction sequence.
-  // We could try to recognize more legal ARM/Thumb2 immediates here.
 
   return false;
 }
diff --git a/llvm/test/CodeGen/ARM/funnel-shift-rot.ll b/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
index a1b6847d623d0..6f34a5fd00314 100644
--- a/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
+++ b/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
@@ -19,7 +19,7 @@ declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
 define i8 @rotl_i8_const_shift(i8 %x) {
 ; CHECK-LABEL: rotl_i8_const_shift:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    uxtb r1, r0
+; CHECK-NEXT:    and r1, r0, #224
 ; CHECK-NEXT:    lsl r0, r0, #3
 ; CHECK-NEXT:    orr r0, r0, r1, lsr #5
 ; CHECK-NEXT:    bx lr
@@ -161,8 +161,7 @@ define <4 x i32> @rotl_v4i32_rotl_const_shift(<4 x i32> %x) {
 define i8 @rotr_i8_const_shift(i8 %x) {
 ; CHECK-LABEL: rotr_i8_const_shift:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    uxtb r1, r0
-; CHECK-NEXT:    lsr r1, r1, #3
+; CHECK-NEXT:    ubfx r1, r0, #3, #5
 ; CHECK-NEXT:    orr r0, r1, r0, lsl #5
 ; CHECK-NEXT:    bx lr
   %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3)
diff --git a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index 7cc623fb0a616..36feb8c4f1f4b 100644
--- a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -21,9 +21,9 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
 ; ARM-LABEL: scalar_i8_signbit_eq:
 ; ARM:       @ %bb.0:
 ; ARM-NEXT:    uxtb r1, r1
-; ARM-NEXT:    lsl r0, r0, r1
+; ARM-NEXT:    mov r2, #128
+; ARM-NEXT:    and r0, r2, r0, lsl r1
 ; ARM-NEXT:    mov r1, #1
-; ARM-NEXT:    uxtb r0, r0
 ; ARM-NEXT:    eor r0, r1, r0, lsr #7
 ; ARM-NEXT:    bx lr
 ;
@@ -42,7 +42,7 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
 ; THUMB78-NEXT:    uxtb r1, r1
 ; THUMB78-NEXT:    lsls r0, r1
 ; THUMB78-NEXT:    movs r1, #1
-; THUMB78-NEXT:    uxtb r0, r0
+; THUMB78-NEXT:    and r0, r0, #128
 ; THUMB78-NEXT:    eor.w r0, r1, r0, lsr #7
 ; THUMB78-NEXT:    bx lr
   %t0 = lshr i8 128, %y
@@ -122,9 +122,9 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
 ; ARM-LABEL: scalar_i16_signbit_eq:
 ; ARM:       @ %bb.0:
 ; ARM-NEXT:    uxth r1, r1
-; ARM-NEXT:    lsl r0, r0, r1
+; ARM-NEXT:    mov r2, #32768
+; ARM-NEXT:    and r0, r2, r0, lsl r1
 ; ARM-NEXT:    mov r1, #1
-; ARM-NEXT:    uxth r0, r0
 ; ARM-NEXT:    eor r0, r1, r0, lsr #15
 ; ARM-NEXT:    bx lr
 ;
@@ -144,7 +144,7 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
 ; THUMB78-NEXT:    uxth r1, r1
 ; THUMB78-NEXT:    lsls r0, r1
 ; THUMB78-NEXT:    movs r1, #1
-; THUMB78-NEXT:    uxth r0, r0
+; THUMB78-NEXT:    and r0, r0, #32768
 ; THUMB78-NEXT:    eor.w r0, r1, r0, lsr #15
 ; THUMB78-NEXT:    bx lr
   %t0 = lshr i16 32768, %y
@@ -862,21 +862,36 @@ define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwi
 ;------------------------------------------------------------------------------;
 
 define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
-; ARM-LABEL: scalar_i8_signbit_ne:
-; ARM:       @ %bb.0:
-; ARM-NEXT:    uxtb r1, r1
-; ARM-NEXT:    lsl r0, r0, r1
-; ARM-NEXT:    uxtb r0, r0
-; ARM-NEXT:    lsr r0, r0, #7
-; ARM-NEXT:    bx lr
+; ARM6-LABEL: scalar_i8_signbit_ne:
+; ARM6:       @ %bb.0:
+; ARM6-NEXT:    uxtb r1, r1
+; ARM6-NEXT:    mov r2, #128
+; ARM6-NEXT:    and r0, r2, r0, lsl r1
+; ARM6-NEXT:    lsr r0, r0, #7
+; ARM6-NEXT:    bx lr
 ;
-; THUMB-LABEL: scalar_i8_signbit_ne:
-; THUMB:       @ %bb.0:
-; THUMB-NEXT:    uxtb r1, r1
-; THUMB-NEXT:    lsls r0, r1
-; THUMB-NEXT:    uxtb r0, r0
-; THUMB-NEXT:    lsrs r0, r0, #7
-; THUMB-NEXT:    bx lr
+; ARM78-LABEL: scalar_i8_signbit_ne:
+; ARM78:       @ %bb.0:
+; ARM78-NEXT:    uxtb r1, r1
+; ARM78-NEXT:    lsl r0, r0, r1
+; ARM78-NEXT:    ubfx r0, r0, #7, #1
+; ARM78-NEXT:    bx lr
+;
+; THUMB6-LABEL: scalar_i8_signbit_ne:
+; THUMB6:       @ %bb.0:
+; THUMB6-NEXT:    uxtb r1, r1
+; THUMB6-NEXT:    lsls r0, r1
+; THUMB6-NEXT:    movs r1, #128
+; THUMB6-NEXT:    ands r1, r0
+; THUMB6-NEXT:    lsrs r0, r1, #7
+; THUMB6-NEXT:    bx lr
+;
+; THUMB78-LABEL: scalar_i8_signbit_ne:
+; THUMB78:       @ %bb.0:
+; THUMB78-NEXT:    uxtb r1, r1
+; THUMB78-NEXT:    lsls r0, r1
+; THUMB78-NEXT:    ubfx r0, r0, #7, #1
+; THUMB78-NEXT:    bx lr
   %t0 = lshr i8 128, %y
   %t1 = and i8 %t0, %x
   %res = icmp ne i8 %t1, 0 ;  we are perfectly happy with 'ne' predicate
@@ -1051,3 +1066,5 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
   %res = icmp eq i8 %t1, 1 ; should be comparing with 0
   ret i1 %res
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; THUMB: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/va_arg.ll b/llvm/test/CodeGen/ARM/va_arg.ll
index 41556b9fa2dec..7228b82b19c5a 100644
--- a/llvm/test/CodeGen/ARM/va_arg.ll
+++ b/llvm/test/CodeGen/ARM/va_arg.ll
@@ -35,7 +35,7 @@ define double @test2(i32 %a, ptr %b, ...) nounwind optsize {
 ; CHECK-NEXT:    add r0, sp, #4
 ; CHECK-NEXT:    stmib sp, {r2, r3}
 ; CHECK-NEXT:    add r0, r0, #11
-; CHECK-NEXT:    bic r0, r0, #3
+; CHECK-NEXT:    bic r0, r0, #7
 ; CHECK-NEXT:    str r2, [r1]
 ; CHECK-NEXT:    add r1, r0, #8
 ; CHECK-NEXT:    str r1, [sp]
diff --git a/llvm/test/CodeGen/Thumb/branch-to-return.ll b/llvm/test/CodeGen/Thumb/branch-to-return.ll
index 11e8add6f215b..b190a11aeab17 100644
--- a/llvm/test/CodeGen/Thumb/branch-to-return.ll
+++ b/llvm/test/CodeGen/Thumb/branch-to-return.ll
@@ -26,7 +26,7 @@ define i32 @foo(ptr %x, i32 %n) {
 ; CHECK-NEXT:    ldr.w r0, [r12]
 ; CHECK-NEXT:  .LBB0_6: @ %for.body.preheader1
 ; CHECK-NEXT:    subs r3, r1, r3
-; CHECK-NEXT:    mvn r2, #12
+; CHECK-NEXT:    mvn r2, #15
 ; CHECK-NEXT:    and.w r1, r2, r1, lsl #2
 ; CHECK-NEXT:    add r1, r12
 ; CHECK-NEXT:  .LBB0_7: @ %for.body
diff --git a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
index bcd92f81911b2..b75f1ff742bee 100644
--- a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
+++ b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
@@ -283,7 +283,7 @@ define void @test_width2(ptr nocapture readnone %x, ptr nocapture %y, i8 zeroext
 ; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
 ; CHECK-NEXT:    adds r0, r2, #1
 ; CHECK-NEXT:    movs r3, #1
-; CHECK-NEXT:    bic r0, r0, #1
+; CHECK-NEXT:    and r0, r0, #510
 ; CHECK-NEXT:    subs r0, #2
 ; CHECK-NEXT:    add.w r0, r3, r0, lsr #1
 ; CHECK-NEXT:    dls lr, r0
>From 361381a134d232e3e4484ab641ca2d7ee6d16c16 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Sat, 25 Oct 2025 20:50:59 -0400
Subject: [PATCH 2/2] Update hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
---
 .../ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll        | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index 36feb8c4f1f4b..a21ac8944d7ad 100644
--- a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -881,9 +881,8 @@ define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
 ; THUMB6:       @ %bb.0:
 ; THUMB6-NEXT:    uxtb r1, r1
 ; THUMB6-NEXT:    lsls r0, r1
-; THUMB6-NEXT:    movs r1, #128
-; THUMB6-NEXT:    ands r1, r0
-; THUMB6-NEXT:    lsrs r0, r1, #7
+; THUMB6-NEXT:    uxtb r0, r0
+; THUMB6-NEXT:    lsrs r0, r0, #7
 ; THUMB6-NEXT:    bx lr
 ;
 ; THUMB78-LABEL: scalar_i8_signbit_ne:
    
    
More information about the llvm-commits
mailing list