[llvm] [ARM] Only change mask if demanded bits says we can optimize (PR #165106)
    via llvm-commits 
    llvm-commits at lists.llvm.org
       
    Sun Oct 26 17:06:37 PDT 2025
    
    
  
https://github.com/AZero13 updated https://github.com/llvm/llvm-project/pull/165106
>From c8a261604ca64d5a9c41ad5d04d3881efb0157f5 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Sat, 25 Oct 2025 12:27:59 -0400
Subject: [PATCH 1/2] [ARM] Only change mask if demanded bits says we can
 optimize
Also enable a switch to turn off enable-logical-imm.
---
 llvm/lib/Target/ARM/ARMISelLowering.cpp       | 100 +++++++++++++-----
 llvm/test/CodeGen/ARM/funnel-shift-rot.ll     |   5 +-
 ...st-and-by-const-from-lshr-in-eqcmp-zero.ll |  56 ++++++----
 llvm/test/CodeGen/ARM/va_arg.ll               |   2 +-
 llvm/test/CodeGen/Thumb/bic_imm.ll            |   6 +-
 llvm/test/CodeGen/Thumb/branch-to-return.ll   |   2 +-
 llvm/test/CodeGen/Thumb2/active_lane_mask.ll  |   2 +-
 llvm/test/CodeGen/Thumb2/bf16-instructions.ll |  38 ++++---
 .../Thumb2/mve-tailpred-nonzerostart.ll       |   2 +-
 llvm/test/CodeGen/Thumb2/shift_parts.ll       |   4 +-
 llvm/test/CodeGen/Thumb2/thumb2-rev16.ll      |   3 +-
 .../Thumb2/urem-seteq-illegal-types.ll        |   3 +-
 12 files changed, 143 insertions(+), 80 deletions(-)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 313ae3d68fb83..03e15d0db0c3e 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -118,6 +118,7 @@ using namespace llvm;
 #define DEBUG_TYPE "arm-isel"
 
 STATISTIC(NumTailCalls, "Number of tail calls");
+STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");
 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
 STATISTIC(NumConstpoolPromoted,
@@ -142,6 +143,12 @@ static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
     cl::desc("Maximum size of ALL constants to promote into a constant pool"),
     cl::init(128));
 
+static cl::opt<bool>
+    EnableOptimizeLogicalImm("arm-enable-logical-imm", cl::Hidden,
+                             cl::desc("Enable ARM logical imm instruction "
+                                      "optimization"),
+                             cl::init(true));
+
 cl::opt<unsigned>
 MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden,
   cl::desc("Maximum interleave factor for MVE VLDn to generate."),
@@ -20138,6 +20145,16 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
   }
 }
 
+static bool isLegalLogicalImmediate(unsigned Imm,
+                                    const ARMSubtarget *Subtarget) {
+  if (!Subtarget->isThumb())
+    return ARM_AM::getSOImmVal(Imm) != -1;
+  if (Subtarget->isThumb2())
+    return ARM_AM::getT2SOImmVal(Imm) != -1;
+  // Thumb1 only has 8-bit unsigned immediate.
+  return Imm <= 255;
+}
+
 bool ARMTargetLowering::targetShrinkDemandedConstant(
     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
     TargetLoweringOpt &TLO) const {
@@ -20146,8 +20163,7 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
   if (!TLO.LegalOps)
     return false;
 
-  // Only optimize AND for now.
-  if (Op.getOpcode() != ISD::AND)
+  if (!EnableOptimizeLogicalImm)
     return false;
 
   EVT VT = Op.getValueType();
@@ -20158,6 +20174,14 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
 
   assert(VT == MVT::i32 && "Unexpected integer type");
 
+  // Exit early if we demand all bits.
+  if (DemandedBits.popcount() == 32)
+    return false;
+
+  // Only optimize AND for now.
+  if (Op.getOpcode() != ISD::AND)
+    return false;
+
   // Make sure the RHS really is a constant.
   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
   if (!C)
@@ -20165,21 +20189,13 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
 
   unsigned Mask = C->getZExtValue();
 
+  if (Mask == 0 || Mask == ~0U)
+    return false;
+
   unsigned Demanded = DemandedBits.getZExtValue();
   unsigned ShrunkMask = Mask & Demanded;
   unsigned ExpandedMask = Mask | ~Demanded;
 
-  // If the mask is all zeros, let the target-independent code replace the
-  // result with zero.
-  if (ShrunkMask == 0)
-    return false;
-
-  // If the mask is all ones, erase the AND. (Currently, the target-independent
-  // code won't do this, so we have to do it explicitly to avoid an infinite
-  // loop in obscure cases.)
-  if (ExpandedMask == ~0U)
-    return TLO.CombineTo(Op, Op.getOperand(0));
-
   auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool {
     return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
   };
@@ -20192,30 +20208,66 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
     return TLO.CombineTo(Op, NewOp);
   };
 
-  // Prefer uxtb mask.
-  if (IsLegalMask(0xFF))
+  // If the mask is all zeros, let the target-independent code replace the
+  // result with zero.
+  if (ShrunkMask == 0) {
+    ++NumOptimizedImms;
+    return UseMask(ShrunkMask);
+  }
+
+  // If the mask is all ones, erase the AND. (Currently, the target-independent
+  // code won't do this, so we have to do it explicitly to avoid an infinite
+  // loop in obscure cases.)
+  if (ExpandedMask == ~0U) {
+    ++NumOptimizedImms;
+    return UseMask(ExpandedMask);
+  }
+
+  // If thumb, check for uxth and uxtb masks first and foremost.
+  if (Subtarget->isThumb1Only() && Subtarget->hasV6Ops()) {
+    if (IsLegalMask(0xFF)) {
+      ++NumOptimizedImms;
+      return UseMask(0xFF);
+    }
+
+    if (IsLegalMask(0xFFFF)) {
+      ++NumOptimizedImms;
+      return UseMask(0xFFFF);
+    }
+  }
+
+  // Don't optimize if it is legal already.
+  if (isLegalLogicalImmediate(Mask, Subtarget))
+    return false;
+
+  if (isLegalLogicalImmediate(~Mask, Subtarget))
+    return UseMask(Mask); // FIXME: Returning false causes infinite loop.
+
+  if (IsLegalMask(0xFF)) {
+    ++NumOptimizedImms;
     return UseMask(0xFF);
+  }
 
-  // Prefer uxth mask.
-  if (IsLegalMask(0xFFFF))
+  if (IsLegalMask(0xFFFF)) {
+    ++NumOptimizedImms;
     return UseMask(0xFFFF);
+  }
 
-  // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
-  // FIXME: Prefer a contiguous sequence of bits for other optimizations.
-  if (ShrunkMask < 256)
+  if (isLegalLogicalImmediate(ShrunkMask, Subtarget)) {
+    ++NumOptimizedImms;
     return UseMask(ShrunkMask);
+  }
 
-  // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
-  // FIXME: Prefer a contiguous sequence of bits for other optimizations.
-  if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256)
+  if (isLegalLogicalImmediate(~ExpandedMask, Subtarget)) {
+    ++NumOptimizedImms;
     return UseMask(ExpandedMask);
+  }
 
   // Potential improvements:
   //
   // We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
   // We could try to prefer Thumb1 immediates which can be lowered to a
   // two-instruction sequence.
-  // We could try to recognize more legal ARM/Thumb2 immediates here.
 
   return false;
 }
diff --git a/llvm/test/CodeGen/ARM/funnel-shift-rot.ll b/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
index a1b6847d623d0..6f34a5fd00314 100644
--- a/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
+++ b/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
@@ -19,7 +19,7 @@ declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
 define i8 @rotl_i8_const_shift(i8 %x) {
 ; CHECK-LABEL: rotl_i8_const_shift:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    uxtb r1, r0
+; CHECK-NEXT:    and r1, r0, #224
 ; CHECK-NEXT:    lsl r0, r0, #3
 ; CHECK-NEXT:    orr r0, r0, r1, lsr #5
 ; CHECK-NEXT:    bx lr
@@ -161,8 +161,7 @@ define <4 x i32> @rotl_v4i32_rotl_const_shift(<4 x i32> %x) {
 define i8 @rotr_i8_const_shift(i8 %x) {
 ; CHECK-LABEL: rotr_i8_const_shift:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    uxtb r1, r0
-; CHECK-NEXT:    lsr r1, r1, #3
+; CHECK-NEXT:    ubfx r1, r0, #3, #5
 ; CHECK-NEXT:    orr r0, r1, r0, lsl #5
 ; CHECK-NEXT:    bx lr
   %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3)
diff --git a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index 7cc623fb0a616..a21ac8944d7ad 100644
--- a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -21,9 +21,9 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
 ; ARM-LABEL: scalar_i8_signbit_eq:
 ; ARM:       @ %bb.0:
 ; ARM-NEXT:    uxtb r1, r1
-; ARM-NEXT:    lsl r0, r0, r1
+; ARM-NEXT:    mov r2, #128
+; ARM-NEXT:    and r0, r2, r0, lsl r1
 ; ARM-NEXT:    mov r1, #1
-; ARM-NEXT:    uxtb r0, r0
 ; ARM-NEXT:    eor r0, r1, r0, lsr #7
 ; ARM-NEXT:    bx lr
 ;
@@ -42,7 +42,7 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
 ; THUMB78-NEXT:    uxtb r1, r1
 ; THUMB78-NEXT:    lsls r0, r1
 ; THUMB78-NEXT:    movs r1, #1
-; THUMB78-NEXT:    uxtb r0, r0
+; THUMB78-NEXT:    and r0, r0, #128
 ; THUMB78-NEXT:    eor.w r0, r1, r0, lsr #7
 ; THUMB78-NEXT:    bx lr
   %t0 = lshr i8 128, %y
@@ -122,9 +122,9 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
 ; ARM-LABEL: scalar_i16_signbit_eq:
 ; ARM:       @ %bb.0:
 ; ARM-NEXT:    uxth r1, r1
-; ARM-NEXT:    lsl r0, r0, r1
+; ARM-NEXT:    mov r2, #32768
+; ARM-NEXT:    and r0, r2, r0, lsl r1
 ; ARM-NEXT:    mov r1, #1
-; ARM-NEXT:    uxth r0, r0
 ; ARM-NEXT:    eor r0, r1, r0, lsr #15
 ; ARM-NEXT:    bx lr
 ;
@@ -144,7 +144,7 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
 ; THUMB78-NEXT:    uxth r1, r1
 ; THUMB78-NEXT:    lsls r0, r1
 ; THUMB78-NEXT:    movs r1, #1
-; THUMB78-NEXT:    uxth r0, r0
+; THUMB78-NEXT:    and r0, r0, #32768
 ; THUMB78-NEXT:    eor.w r0, r1, r0, lsr #15
 ; THUMB78-NEXT:    bx lr
   %t0 = lshr i16 32768, %y
@@ -862,21 +862,35 @@ define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwi
 ;------------------------------------------------------------------------------;
 
 define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
-; ARM-LABEL: scalar_i8_signbit_ne:
-; ARM:       @ %bb.0:
-; ARM-NEXT:    uxtb r1, r1
-; ARM-NEXT:    lsl r0, r0, r1
-; ARM-NEXT:    uxtb r0, r0
-; ARM-NEXT:    lsr r0, r0, #7
-; ARM-NEXT:    bx lr
+; ARM6-LABEL: scalar_i8_signbit_ne:
+; ARM6:       @ %bb.0:
+; ARM6-NEXT:    uxtb r1, r1
+; ARM6-NEXT:    mov r2, #128
+; ARM6-NEXT:    and r0, r2, r0, lsl r1
+; ARM6-NEXT:    lsr r0, r0, #7
+; ARM6-NEXT:    bx lr
 ;
-; THUMB-LABEL: scalar_i8_signbit_ne:
-; THUMB:       @ %bb.0:
-; THUMB-NEXT:    uxtb r1, r1
-; THUMB-NEXT:    lsls r0, r1
-; THUMB-NEXT:    uxtb r0, r0
-; THUMB-NEXT:    lsrs r0, r0, #7
-; THUMB-NEXT:    bx lr
+; ARM78-LABEL: scalar_i8_signbit_ne:
+; ARM78:       @ %bb.0:
+; ARM78-NEXT:    uxtb r1, r1
+; ARM78-NEXT:    lsl r0, r0, r1
+; ARM78-NEXT:    ubfx r0, r0, #7, #1
+; ARM78-NEXT:    bx lr
+;
+; THUMB6-LABEL: scalar_i8_signbit_ne:
+; THUMB6:       @ %bb.0:
+; THUMB6-NEXT:    uxtb r1, r1
+; THUMB6-NEXT:    lsls r0, r1
+; THUMB6-NEXT:    uxtb r0, r0
+; THUMB6-NEXT:    lsrs r0, r0, #7
+; THUMB6-NEXT:    bx lr
+;
+; THUMB78-LABEL: scalar_i8_signbit_ne:
+; THUMB78:       @ %bb.0:
+; THUMB78-NEXT:    uxtb r1, r1
+; THUMB78-NEXT:    lsls r0, r1
+; THUMB78-NEXT:    ubfx r0, r0, #7, #1
+; THUMB78-NEXT:    bx lr
   %t0 = lshr i8 128, %y
   %t1 = and i8 %t0, %x
   %res = icmp ne i8 %t1, 0 ;  we are perfectly happy with 'ne' predicate
@@ -1051,3 +1065,5 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
   %res = icmp eq i8 %t1, 1 ; should be comparing with 0
   ret i1 %res
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; THUMB: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/va_arg.ll b/llvm/test/CodeGen/ARM/va_arg.ll
index 41556b9fa2dec..7228b82b19c5a 100644
--- a/llvm/test/CodeGen/ARM/va_arg.ll
+++ b/llvm/test/CodeGen/ARM/va_arg.ll
@@ -35,7 +35,7 @@ define double @test2(i32 %a, ptr %b, ...) nounwind optsize {
 ; CHECK-NEXT:    add r0, sp, #4
 ; CHECK-NEXT:    stmib sp, {r2, r3}
 ; CHECK-NEXT:    add r0, r0, #11
-; CHECK-NEXT:    bic r0, r0, #3
+; CHECK-NEXT:    bic r0, r0, #7
 ; CHECK-NEXT:    str r2, [r1]
 ; CHECK-NEXT:    add r1, r0, #8
 ; CHECK-NEXT:    str r1, [sp]
diff --git a/llvm/test/CodeGen/Thumb/bic_imm.ll b/llvm/test/CodeGen/Thumb/bic_imm.ll
index 741b2cf8db2e3..d1730c21250a8 100644
--- a/llvm/test/CodeGen/Thumb/bic_imm.ll
+++ b/llvm/test/CodeGen/Thumb/bic_imm.ll
@@ -82,7 +82,7 @@ define void @truncated_neg256(i16 %a, ptr %p) {
 ;
 ; CHECK-T2-LABEL: truncated_neg256:
 ; CHECK-T2:       @ %bb.0:
-; CHECK-T2-NEXT:    bic r0, r0, #255
+; CHECK-T2-NEXT:    and r0, r0, #65280
 ; CHECK-T2-NEXT:    strh r0, [r1]
 ; CHECK-T2-NEXT:    bx lr
   %and = and i16 %a, -256
@@ -90,7 +90,6 @@ define void @truncated_neg256(i16 %a, ptr %p) {
   ret void
 }
 
-; FIXME: Thumb2 supports "bic r0, r0, #510"
 define void @truncated_neg511(i16 %a, ptr %p) {
 ; CHECK-T1-LABEL: truncated_neg511:
 ; CHECK-T1:       @ %bb.0:
@@ -105,8 +104,7 @@ define void @truncated_neg511(i16 %a, ptr %p) {
 ;
 ; CHECK-T2-LABEL: truncated_neg511:
 ; CHECK-T2:       @ %bb.0:
-; CHECK-T2-NEXT:    movw r2, #65025
-; CHECK-T2-NEXT:    ands r0, r2
+; CHECK-T2-NEXT:    bic r0, r0, #510
 ; CHECK-T2-NEXT:    strh r0, [r1]
 ; CHECK-T2-NEXT:    bx lr
   %and = and i16 %a, -511
diff --git a/llvm/test/CodeGen/Thumb/branch-to-return.ll b/llvm/test/CodeGen/Thumb/branch-to-return.ll
index 11e8add6f215b..b190a11aeab17 100644
--- a/llvm/test/CodeGen/Thumb/branch-to-return.ll
+++ b/llvm/test/CodeGen/Thumb/branch-to-return.ll
@@ -26,7 +26,7 @@ define i32 @foo(ptr %x, i32 %n) {
 ; CHECK-NEXT:    ldr.w r0, [r12]
 ; CHECK-NEXT:  .LBB0_6: @ %for.body.preheader1
 ; CHECK-NEXT:    subs r3, r1, r3
-; CHECK-NEXT:    mvn r2, #12
+; CHECK-NEXT:    mvn r2, #15
 ; CHECK-NEXT:    and.w r1, r2, r1, lsl #2
 ; CHECK-NEXT:    add r1, r12
 ; CHECK-NEXT:  .LBB0_7: @ %for.body
diff --git a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
index bcd92f81911b2..b75f1ff742bee 100644
--- a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
+++ b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
@@ -283,7 +283,7 @@ define void @test_width2(ptr nocapture readnone %x, ptr nocapture %y, i8 zeroext
 ; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
 ; CHECK-NEXT:    adds r0, r2, #1
 ; CHECK-NEXT:    movs r3, #1
-; CHECK-NEXT:    bic r0, r0, #1
+; CHECK-NEXT:    and r0, r0, #510
 ; CHECK-NEXT:    subs r0, #2
 ; CHECK-NEXT:    add.w r0, r3, r0, lsr #1
 ; CHECK-NEXT:    dls lr, r0
diff --git a/llvm/test/CodeGen/Thumb2/bf16-instructions.ll b/llvm/test/CodeGen/Thumb2/bf16-instructions.ll
index 313d237d54b35..3d0802a7e9ff7 100644
--- a/llvm/test/CodeGen/Thumb2/bf16-instructions.ll
+++ b/llvm/test/CodeGen/Thumb2/bf16-instructions.ll
@@ -2104,12 +2104,11 @@ define bfloat @test_copysign(bfloat %a, bfloat %b) {
 ;
 ; CHECK-FP-LABEL: test_copysign:
 ; CHECK-FP:       @ %bb.0:
-; CHECK-FP-NEXT:    vmov r0, s0
-; CHECK-FP-NEXT:    vmov r1, s1
-; CHECK-FP-NEXT:    and r1, r1, #32768
-; CHECK-FP-NEXT:    bfc r0, #15, #17
-; CHECK-FP-NEXT:    add r0, r1
-; CHECK-FP-NEXT:    vmov.f16 s0, r0
+; CHECK-FP-NEXT:    vmov r0, s1
+; CHECK-FP-NEXT:    vmov r1, s0
+; CHECK-FP-NEXT:    lsrs r0, r0, #15
+; CHECK-FP-NEXT:    bfi r1, r0, #15, #1
+; CHECK-FP-NEXT:    vmov.f16 s0, r1
 ; CHECK-FP-NEXT:    vmov.f16 r0, s0
 ; CHECK-FP-NEXT:    vmov s0, r0
 ; CHECK-FP-NEXT:    bx lr
@@ -2128,11 +2127,11 @@ define bfloat @test_copysign_f32(bfloat %a, float %b) {
 ;
 ; CHECK-FP-LABEL: test_copysign_f32:
 ; CHECK-FP:       @ %bb.0:
-; CHECK-FP-NEXT:    vmov r0, s0
-; CHECK-FP-NEXT:    vmov r1, s1
-; CHECK-FP-NEXT:    and r1, r1, #-2147483648
-; CHECK-FP-NEXT:    bfc r0, #15, #17
-; CHECK-FP-NEXT:    orr.w r0, r0, r1, lsr #16
+; CHECK-FP-NEXT:    vmov r0, s1
+; CHECK-FP-NEXT:    vmov r1, s0
+; CHECK-FP-NEXT:    and r0, r0, #-2147483648
+; CHECK-FP-NEXT:    bic r1, r1, #32768
+; CHECK-FP-NEXT:    orr.w r0, r1, r0, lsr #16
 ; CHECK-FP-NEXT:    vmov.f16 s0, r0
 ; CHECK-FP-NEXT:    vmov.f16 r0, s0
 ; CHECK-FP-NEXT:    vmov s0, r0
@@ -2153,10 +2152,10 @@ define bfloat @test_copysign_f64(bfloat %a, double %b) {
 ;
 ; CHECK-FP-LABEL: test_copysign_f64:
 ; CHECK-FP:       @ %bb.0:
+; CHECK-FP-NEXT:    vmov r0, r1, d1
 ; CHECK-FP-NEXT:    vmov r0, s0
-; CHECK-FP-NEXT:    vmov r1, r2, d1
-; CHECK-FP-NEXT:    and r1, r2, #-2147483648
-; CHECK-FP-NEXT:    bfc r0, #15, #17
+; CHECK-FP-NEXT:    and r1, r1, #-2147483648
+; CHECK-FP-NEXT:    bic r0, r0, #32768
 ; CHECK-FP-NEXT:    orr.w r0, r0, r1, lsr #16
 ; CHECK-FP-NEXT:    vmov.f16 s0, r0
 ; CHECK-FP-NEXT:    vmov.f16 r0, s0
@@ -2180,12 +2179,11 @@ define float @test_copysign_extended(bfloat %a, bfloat %b) {
 ;
 ; CHECK-FP-LABEL: test_copysign_extended:
 ; CHECK-FP:       @ %bb.0:
-; CHECK-FP-NEXT:    vmov r0, s0
-; CHECK-FP-NEXT:    vmov r1, s1
-; CHECK-FP-NEXT:    and r1, r1, #32768
-; CHECK-FP-NEXT:    bfc r0, #15, #17
-; CHECK-FP-NEXT:    add r0, r1
-; CHECK-FP-NEXT:    lsls r0, r0, #16
+; CHECK-FP-NEXT:    vmov r0, s1
+; CHECK-FP-NEXT:    vmov r1, s0
+; CHECK-FP-NEXT:    lsrs r0, r0, #15
+; CHECK-FP-NEXT:    bfi r1, r0, #15, #1
+; CHECK-FP-NEXT:    lsls r0, r1, #16
 ; CHECK-FP-NEXT:    vmov s0, r0
 ; CHECK-FP-NEXT:    bx lr
   %r = call bfloat @llvm.copysign.f16(bfloat %a, bfloat %b)
diff --git a/llvm/test/CodeGen/Thumb2/mve-tailpred-nonzerostart.ll b/llvm/test/CodeGen/Thumb2/mve-tailpred-nonzerostart.ll
index 244a96595eaec..b17742861755d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-tailpred-nonzerostart.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-tailpred-nonzerostart.ll
@@ -204,7 +204,7 @@ define arm_aapcs_vfpcc void @startSmod4(i32 %S, ptr nocapture readonly %x, ptr n
 ; CHECK-NEXT:    poplt {r4, pc}
 ; CHECK-NEXT:  .LBB3_1: @ %vector.ph
 ; CHECK-NEXT:    vmov r12, s0
-; CHECK-NEXT:    mvn r4, #12
+; CHECK-NEXT:    mvn r4, #15
 ; CHECK-NEXT:    and.w r4, r4, r0, lsl #2
 ; CHECK-NEXT:    add r1, r4
 ; CHECK-NEXT:    add r2, r4
diff --git a/llvm/test/CodeGen/Thumb2/shift_parts.ll b/llvm/test/CodeGen/Thumb2/shift_parts.ll
index b4ac405d82ed5..060c628013050 100644
--- a/llvm/test/CodeGen/Thumb2/shift_parts.ll
+++ b/llvm/test/CodeGen/Thumb2/shift_parts.ll
@@ -457,7 +457,7 @@ entry:
 define i32 @ashr_demand_bottommask2(i64 %x) {
 ; CHECK-LABEL: ashr_demand_bottommask2:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    mvn r0, #2
+; CHECK-NEXT:    mvn r0, #3
 ; CHECK-NEXT:    and.w r0, r0, r1, lsl #1
 ; CHECK-NEXT:    bx lr
 entry:
@@ -470,7 +470,7 @@ entry:
 define i32 @lshr_demand_bottommask2(i64 %x) {
 ; CHECK-LABEL: lshr_demand_bottommask2:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    mvn r0, #2
+; CHECK-NEXT:    mvn r0, #3
 ; CHECK-NEXT:    and.w r0, r0, r1, lsl #1
 ; CHECK-NEXT:    bx lr
 entry:
diff --git a/llvm/test/CodeGen/Thumb2/thumb2-rev16.ll b/llvm/test/CodeGen/Thumb2/thumb2-rev16.ll
index 59d5e5489015e..e27a049ecc252 100644
--- a/llvm/test/CodeGen/Thumb2/thumb2-rev16.ll
+++ b/llvm/test/CodeGen/Thumb2/thumb2-rev16.ll
@@ -71,9 +71,8 @@ define i32 @different_shift_amount(i32 %a) {
 ; CHECK-LABEL: different_shift_amount:
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    mov.w r1, #16711935
-; CHECK-NEXT:    movw r2, #65024
+; CHECK-NEXT:    mvn r2, #16711680
 ; CHECK-NEXT:    and.w r1, r1, r0, lsr #8
-; CHECK-NEXT:    movt r2, #65280
 ; CHECK-NEXT:    and.w r0, r2, r0, lsl #9
 ; CHECK-NEXT:    add r0, r1
 ; CHECK-NEXT:    bx lr
diff --git a/llvm/test/CodeGen/Thumb2/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/Thumb2/urem-seteq-illegal-types.ll
index a0247c29f257f..1f1173bd43448 100644
--- a/llvm/test/CodeGen/Thumb2/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/Thumb2/urem-seteq-illegal-types.ll
@@ -26,7 +26,8 @@ define i1 @test_urem_even(i27 %X) nounwind {
 ; CHECK-NEXT:    movt r1, #1755
 ; CHECK-NEXT:    movt r2, #146
 ; CHECK-NEXT:    muls r0, r1, r0
-; CHECK-NEXT:    ubfx r1, r0, #1, #26
+; CHECK-NEXT:    bic r1, r0, #134217728
+; CHECK-NEXT:    lsrs r1, r1, #1
 ; CHECK-NEXT:    orr.w r0, r1, r0, lsl #26
 ; CHECK-NEXT:    bic r1, r0, #-134217728
 ; CHECK-NEXT:    movs r0, #0
>From 9bfcb9053ee9cdc0ff50664192cbb13433cc4906 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Sun, 26 Oct 2025 20:06:27 -0400
Subject: [PATCH 2/2] Update ARMISelLowering.cpp
---
 llvm/lib/Target/ARM/ARMISelLowering.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 03e15d0db0c3e..debcf6c267c7f 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -20241,7 +20241,7 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
     return false;
 
   if (isLegalLogicalImmediate(~Mask, Subtarget))
-    return UseMask(Mask); // FIXME: Returning false causes infinite loop.
+    return false; // FIXME: Returning false causes infinite loop.
 
   if (IsLegalMask(0xFF)) {
     ++NumOptimizedImms;
    
    
More information about the llvm-commits
mailing list