[llvm] [ARM] Only change mask if demanded bits says we can optimize (PR #165106)
    via llvm-commits 
    llvm-commits at lists.llvm.org
       
    Sun Oct 26 16:51:14 PDT 2025
    
    
  
https://github.com/AZero13 updated https://github.com/llvm/llvm-project/pull/165106
>From 50be66956ac8612bc2d4c80573641ed8733e4cf0 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Sat, 25 Oct 2025 12:27:59 -0400
Subject: [PATCH 1/3] [ARM] Only change mask if demanded bits says we can
 optimize
Also enable a switch to turn off enable-logical-imm.
---
 llvm/lib/Target/ARM/ARMISelLowering.cpp       |  94 ++++++++---
 llvm/test/CodeGen/ARM/funnel-shift-rot.ll     |   5 +-
 ...st-and-by-const-from-lshr-in-eqcmp-zero.ll |  56 ++++---
 llvm/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll | 158 +++++++++++++-----
 llvm/test/CodeGen/ARM/va_arg.ll               |   2 +-
 llvm/test/CodeGen/Thumb/bic_imm.ll            |   6 +-
 llvm/test/CodeGen/Thumb/branch-to-return.ll   |   2 +-
 llvm/test/CodeGen/Thumb2/active_lane_mask.ll  |   2 +-
 8 files changed, 228 insertions(+), 97 deletions(-)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 313ae3d68fb83..41a0ca1271cf5 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -118,6 +118,7 @@ using namespace llvm;
 #define DEBUG_TYPE "arm-isel"
 
 STATISTIC(NumTailCalls, "Number of tail calls");
+STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");
 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
 STATISTIC(NumConstpoolPromoted,
@@ -142,6 +143,12 @@ static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
     cl::desc("Maximum size of ALL constants to promote into a constant pool"),
     cl::init(128));
 
+static cl::opt<bool>
+    EnableOptimizeLogicalImm("arm-enable-logical-imm", cl::Hidden,
+                             cl::desc("Enable ARM logical imm instruction "
+                                      "optimization"),
+                             cl::init(true));
+
 cl::opt<unsigned>
 MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden,
   cl::desc("Maximum interleave factor for MVE VLDn to generate."),
@@ -20138,6 +20145,16 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
   }
 }
 
+static bool isLegalLogicalImmediate(unsigned Imm,
+                                    const ARMSubtarget *Subtarget) {
+  if (!Subtarget->isThumb())
+    return ARM_AM::getSOImmVal(Imm) != -1;
+  if (Subtarget->isThumb2())
+    return ARM_AM::getT2SOImmVal(Imm) != -1;
+  // Thumb1 only has 8-bit unsigned immediate.
+  return Imm <= 255;
+}
+
 bool ARMTargetLowering::targetShrinkDemandedConstant(
     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
     TargetLoweringOpt &TLO) const {
@@ -20146,8 +20163,7 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
   if (!TLO.LegalOps)
     return false;
 
-  // Only optimize AND for now.
-  if (Op.getOpcode() != ISD::AND)
+  if (!EnableOptimizeLogicalImm)
     return false;
 
   EVT VT = Op.getValueType();
@@ -20158,6 +20174,14 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
 
   assert(VT == MVT::i32 && "Unexpected integer type");
 
+  // Exit early if we demand all bits.
+  if (DemandedBits.popcount() == 32)
+    return false;
+
+  // Only optimize AND for now.
+  if (Op.getOpcode() != ISD::AND)
+    return false;
+
   // Make sure the RHS really is a constant.
   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
   if (!C)
@@ -20165,21 +20189,13 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
 
   unsigned Mask = C->getZExtValue();
 
+  if (Mask == 0 || Mask == ~0U)
+    return false;
+
   unsigned Demanded = DemandedBits.getZExtValue();
   unsigned ShrunkMask = Mask & Demanded;
   unsigned ExpandedMask = Mask | ~Demanded;
 
-  // If the mask is all zeros, let the target-independent code replace the
-  // result with zero.
-  if (ShrunkMask == 0)
-    return false;
-
-  // If the mask is all ones, erase the AND. (Currently, the target-independent
-  // code won't do this, so we have to do it explicitly to avoid an infinite
-  // loop in obscure cases.)
-  if (ExpandedMask == ~0U)
-    return TLO.CombineTo(Op, Op.getOperand(0));
-
   auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool {
     return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
   };
@@ -20192,30 +20208,56 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
     return TLO.CombineTo(Op, NewOp);
   };
 
-  // Prefer uxtb mask.
-  if (IsLegalMask(0xFF))
-    return UseMask(0xFF);
+  // If the mask is all zeros, let the target-independent code replace the
+  // result with zero.
+  if (ShrunkMask == 0) {
+    ++NumOptimizedImms;
+    return UseMask(ShrunkMask);
+  }
+
+  // If the mask is all ones, erase the AND. (Currently, the target-independent
+  // code won't do this, so we have to do it explicitly to avoid an infinite
+  // loop in obscure cases.)
+  if (ExpandedMask == ~0U) {
+    ++NumOptimizedImms;
+    return UseMask(ExpandedMask);
+  }
+
+  // If thumb, check for uxth and uxtb masks first and foremost.
+  if (Subtarget->isThumb1Only() && Subtarget->hasV6Ops()) {
+    if (IsLegalMask(0xFF)) {
+      ++NumOptimizedImms;
+      return UseMask(0xFF);
+    }
 
-  // Prefer uxth mask.
-  if (IsLegalMask(0xFFFF))
-    return UseMask(0xFFFF);
+    if (IsLegalMask(0xFFFF)) {
+      ++NumOptimizedImms;
+      return UseMask(0xFFFF);
+    }
+  }
 
-  // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
-  // FIXME: Prefer a contiguous sequence of bits for other optimizations.
-  if (ShrunkMask < 256)
+  // Don't optimize if it is legal already.
+  if (isLegalLogicalImmediate(Mask, Subtarget))
+    return false;
+
+  if (isLegalLogicalImmediate(~Mask, Subtarget))
+    return UseMask(Mask); // FIXME: Returning false causes infinite loop.
+
+  if (isLegalLogicalImmediate(ShrunkMask, Subtarget)) {
+    ++NumOptimizedImms;
     return UseMask(ShrunkMask);
+  }
 
-  // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
-  // FIXME: Prefer a contiguous sequence of bits for other optimizations.
-  if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256)
+  if (isLegalLogicalImmediate(~ExpandedMask, Subtarget)) {
+    ++NumOptimizedImms;
     return UseMask(ExpandedMask);
+  }
 
   // Potential improvements:
   //
   // We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
   // We could try to prefer Thumb1 immediates which can be lowered to a
   // two-instruction sequence.
-  // We could try to recognize more legal ARM/Thumb2 immediates here.
 
   return false;
 }
diff --git a/llvm/test/CodeGen/ARM/funnel-shift-rot.ll b/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
index a1b6847d623d0..6f34a5fd00314 100644
--- a/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
+++ b/llvm/test/CodeGen/ARM/funnel-shift-rot.ll
@@ -19,7 +19,7 @@ declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
 define i8 @rotl_i8_const_shift(i8 %x) {
 ; CHECK-LABEL: rotl_i8_const_shift:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    uxtb r1, r0
+; CHECK-NEXT:    and r1, r0, #224
 ; CHECK-NEXT:    lsl r0, r0, #3
 ; CHECK-NEXT:    orr r0, r0, r1, lsr #5
 ; CHECK-NEXT:    bx lr
@@ -161,8 +161,7 @@ define <4 x i32> @rotl_v4i32_rotl_const_shift(<4 x i32> %x) {
 define i8 @rotr_i8_const_shift(i8 %x) {
 ; CHECK-LABEL: rotr_i8_const_shift:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    uxtb r1, r0
-; CHECK-NEXT:    lsr r1, r1, #3
+; CHECK-NEXT:    ubfx r1, r0, #3, #5
 ; CHECK-NEXT:    orr r0, r1, r0, lsl #5
 ; CHECK-NEXT:    bx lr
   %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3)
diff --git a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index 7cc623fb0a616..a21ac8944d7ad 100644
--- a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -21,9 +21,9 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
 ; ARM-LABEL: scalar_i8_signbit_eq:
 ; ARM:       @ %bb.0:
 ; ARM-NEXT:    uxtb r1, r1
-; ARM-NEXT:    lsl r0, r0, r1
+; ARM-NEXT:    mov r2, #128
+; ARM-NEXT:    and r0, r2, r0, lsl r1
 ; ARM-NEXT:    mov r1, #1
-; ARM-NEXT:    uxtb r0, r0
 ; ARM-NEXT:    eor r0, r1, r0, lsr #7
 ; ARM-NEXT:    bx lr
 ;
@@ -42,7 +42,7 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
 ; THUMB78-NEXT:    uxtb r1, r1
 ; THUMB78-NEXT:    lsls r0, r1
 ; THUMB78-NEXT:    movs r1, #1
-; THUMB78-NEXT:    uxtb r0, r0
+; THUMB78-NEXT:    and r0, r0, #128
 ; THUMB78-NEXT:    eor.w r0, r1, r0, lsr #7
 ; THUMB78-NEXT:    bx lr
   %t0 = lshr i8 128, %y
@@ -122,9 +122,9 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
 ; ARM-LABEL: scalar_i16_signbit_eq:
 ; ARM:       @ %bb.0:
 ; ARM-NEXT:    uxth r1, r1
-; ARM-NEXT:    lsl r0, r0, r1
+; ARM-NEXT:    mov r2, #32768
+; ARM-NEXT:    and r0, r2, r0, lsl r1
 ; ARM-NEXT:    mov r1, #1
-; ARM-NEXT:    uxth r0, r0
 ; ARM-NEXT:    eor r0, r1, r0, lsr #15
 ; ARM-NEXT:    bx lr
 ;
@@ -144,7 +144,7 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
 ; THUMB78-NEXT:    uxth r1, r1
 ; THUMB78-NEXT:    lsls r0, r1
 ; THUMB78-NEXT:    movs r1, #1
-; THUMB78-NEXT:    uxth r0, r0
+; THUMB78-NEXT:    and r0, r0, #32768
 ; THUMB78-NEXT:    eor.w r0, r1, r0, lsr #15
 ; THUMB78-NEXT:    bx lr
   %t0 = lshr i16 32768, %y
@@ -862,21 +862,35 @@ define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwi
 ;------------------------------------------------------------------------------;
 
 define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
-; ARM-LABEL: scalar_i8_signbit_ne:
-; ARM:       @ %bb.0:
-; ARM-NEXT:    uxtb r1, r1
-; ARM-NEXT:    lsl r0, r0, r1
-; ARM-NEXT:    uxtb r0, r0
-; ARM-NEXT:    lsr r0, r0, #7
-; ARM-NEXT:    bx lr
+; ARM6-LABEL: scalar_i8_signbit_ne:
+; ARM6:       @ %bb.0:
+; ARM6-NEXT:    uxtb r1, r1
+; ARM6-NEXT:    mov r2, #128
+; ARM6-NEXT:    and r0, r2, r0, lsl r1
+; ARM6-NEXT:    lsr r0, r0, #7
+; ARM6-NEXT:    bx lr
 ;
-; THUMB-LABEL: scalar_i8_signbit_ne:
-; THUMB:       @ %bb.0:
-; THUMB-NEXT:    uxtb r1, r1
-; THUMB-NEXT:    lsls r0, r1
-; THUMB-NEXT:    uxtb r0, r0
-; THUMB-NEXT:    lsrs r0, r0, #7
-; THUMB-NEXT:    bx lr
+; ARM78-LABEL: scalar_i8_signbit_ne:
+; ARM78:       @ %bb.0:
+; ARM78-NEXT:    uxtb r1, r1
+; ARM78-NEXT:    lsl r0, r0, r1
+; ARM78-NEXT:    ubfx r0, r0, #7, #1
+; ARM78-NEXT:    bx lr
+;
+; THUMB6-LABEL: scalar_i8_signbit_ne:
+; THUMB6:       @ %bb.0:
+; THUMB6-NEXT:    uxtb r1, r1
+; THUMB6-NEXT:    lsls r0, r1
+; THUMB6-NEXT:    uxtb r0, r0
+; THUMB6-NEXT:    lsrs r0, r0, #7
+; THUMB6-NEXT:    bx lr
+;
+; THUMB78-LABEL: scalar_i8_signbit_ne:
+; THUMB78:       @ %bb.0:
+; THUMB78-NEXT:    uxtb r1, r1
+; THUMB78-NEXT:    lsls r0, r1
+; THUMB78-NEXT:    ubfx r0, r0, #7, #1
+; THUMB78-NEXT:    bx lr
   %t0 = lshr i8 128, %y
   %t1 = and i8 %t0, %x
   %res = icmp ne i8 %t1, 0 ;  we are perfectly happy with 'ne' predicate
@@ -1051,3 +1065,5 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
   %res = icmp eq i8 %t1, 1 ; should be comparing with 0
   ret i1 %res
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; THUMB: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll b/llvm/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll
index 4b0419577cdf0..7f4315ffcaa6c 100644
--- a/llvm/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll
+++ b/llvm/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
 ; RUN: llc -mtriple=thumbv8 %s -o -       | FileCheck %s --check-prefixes=CHECK,T2
 ; RUN: llc -mtriple=thumbv8m.main %s -o - | FileCheck %s --check-prefixes=CHECK,T2
 ; RUN: llc -mtriple=thumbv8m.base %s -o - | FileCheck %s --check-prefixes=CHECK,T1
@@ -13,11 +14,23 @@
 
 ; Test sdiv i16
 define dso_local signext i16 @f0(i16 signext %F) local_unnamed_addr #0 {
-; CHECK-LABEL: f0
-; CHECK:       movs    r1, #2
-; CHECK-NEXT:  sdiv    r0, r0, r1
-; CHECK-NEXT:  sxth    r0, r0
-; CHECK-NEXT:  bx      lr
+; CHECK-LABEL: f0:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r1, #2
+; CHECK-NEXT:    sdiv r0, r0, r1
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    bx lr
+;
+; V6M-LABEL: f0:
+; V6M:       @ %bb.0: @ %entry
+; V6M-NEXT:    movs r1, #255
+; V6M-NEXT:    lsls r1, r1, #8
+; V6M-NEXT:    ands r1, r0
+; V6M-NEXT:    lsrs r1, r1, #15
+; V6M-NEXT:    adds r0, r0, r1
+; V6M-NEXT:    sxth r0, r0
+; V6M-NEXT:    asrs r0, r0, #1
+; V6M-NEXT:    bx lr
 
 entry:
   %0 = sdiv i16 %F, 2
@@ -26,10 +39,19 @@ entry:
 
 ; Same as above, but now with i32
 define dso_local i32 @f1(i32 %F) local_unnamed_addr #0 {
-; CHECK-LABEL: f1
-; CHECK:       movs    r1, #4
-; CHECK-NEXT:  sdiv    r0, r0, r1
-; CHECK-NEXT:  bx      lr
+; CHECK-LABEL: f1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r1, #4
+; CHECK-NEXT:    sdiv r0, r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V6M-LABEL: f1:
+; V6M:       @ %bb.0: @ %entry
+; V6M-NEXT:    asrs r1, r0, #31
+; V6M-NEXT:    lsrs r1, r1, #30
+; V6M-NEXT:    adds r0, r0, r1
+; V6M-NEXT:    asrs r0, r0, #2
+; V6M-NEXT:    bx lr
 
 entry:
   %div = sdiv i32 %F, 4
@@ -38,10 +60,18 @@ entry:
 
 ; The immediate is not a power of 2, so we expect a sdiv.
 define dso_local i32 @f2(i32 %F) local_unnamed_addr #0 {
-; CHECK-LABEL: f2
-; CHECK:       movs    r1, #5
-; CHECK-NEXT:  sdiv    r0, r0, r1
-; CHECK-NEXT:  bx      lr
+; CHECK-LABEL: f2:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r1, #5
+; CHECK-NEXT:    sdiv r0, r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V6M-LABEL: f2:
+; V6M:       @ %bb.0: @ %entry
+; V6M-NEXT:    push {r7, lr}
+; V6M-NEXT:    movs r1, #5
+; V6M-NEXT:    bl __divsi3
+; V6M-NEXT:    pop {r7, pc}
 
 entry:
   %div = sdiv i32 %F, 5
@@ -51,8 +81,28 @@ entry:
 ; Try a larger power of 2 immediate: immediates larger than
 ; 128 don't give any code size savings.
 define dso_local i32 @f3(i32 %F) local_unnamed_addr #0 {
-; CHECK-LABEL:  f3
-; CHECK-NOT:    sdiv
+; T2-LABEL: f3:
+; T2:       @ %bb.0: @ %entry
+; T2-NEXT:    asrs r1, r0, #31
+; T2-NEXT:    add.w r0, r0, r1, lsr #24
+; T2-NEXT:    asrs r0, r0, #8
+; T2-NEXT:    bx lr
+;
+; T1-LABEL: f3:
+; T1:       @ %bb.0: @ %entry
+; T1-NEXT:    asrs r1, r0, #31
+; T1-NEXT:    lsrs r1, r1, #24
+; T1-NEXT:    adds r0, r0, r1
+; T1-NEXT:    asrs r0, r0, #8
+; T1-NEXT:    bx lr
+;
+; V6M-LABEL: f3:
+; V6M:       @ %bb.0: @ %entry
+; V6M-NEXT:    asrs r1, r0, #31
+; V6M-NEXT:    lsrs r1, r1, #24
+; V6M-NEXT:    adds r0, r0, r1
+; V6M-NEXT:    asrs r0, r0, #8
+; V6M-NEXT:    bx lr
 entry:
   %div = sdiv i32 %F, 256
   ret i32 %div
@@ -65,20 +115,35 @@ attributes #0 = { minsize norecurse nounwind optsize readnone }
 ; the sdiv to sdiv, but to the faster instruction sequence.
 
 define dso_local signext i16 @f4(i16 signext %F) {
-; T2-LABEL:  f4
-; T2:        uxth    r1, r0
-; T2-NEXT:   add.w   r0, r0, r1, lsr #15
-; T2-NEXT:   sxth    r0, r0
-; T2-NEXT:   asrs    r0, r0, #1
-; T2-NEXT:   bx      lr
-
-; T1-LABEL: f4
-; T1: 	    uxth  r1, r0
-; T1-NEXT: 	lsrs  r1, r1, #15
-; T1-NEXT: 	adds  r0, r0, r1
-; T1-NEXT: 	sxth  r0, r0
-; T1-NEXT: 	asrs  r0, r0, #1
-; T1-NEXT: 	bx	lr
+; T2-LABEL: f4:
+; T2:       @ %bb.0: @ %entry
+; T2-NEXT:    and r1, r0, #32768
+; T2-NEXT:    add.w r0, r0, r1, lsr #15
+; T2-NEXT:    sxth r0, r0
+; T2-NEXT:    asrs r0, r0, #1
+; T2-NEXT:    bx lr
+;
+; T1-LABEL: f4:
+; T1:       @ %bb.0: @ %entry
+; T1-NEXT:    movw r1, #65280
+; T1-NEXT:    ands r1, r0
+; T1-NEXT:    lsrs r1, r1, #15
+; T1-NEXT:    adds r0, r0, r1
+; T1-NEXT:    sxth r0, r0
+; T1-NEXT:    asrs r0, r0, #1
+; T1-NEXT:    bx lr
+;
+; V6M-LABEL: f4:
+; V6M:       @ %bb.0: @ %entry
+; V6M-NEXT:    movs r1, #255
+; V6M-NEXT:    lsls r1, r1, #8
+; V6M-NEXT:    ands r1, r0
+; V6M-NEXT:    lsrs r1, r1, #15
+; V6M-NEXT:    adds r0, r0, r1
+; V6M-NEXT:    sxth r0, r0
+; V6M-NEXT:    asrs r0, r0, #1
+; V6M-NEXT:    bx lr
+
 
 entry:
   %0 = sdiv i16 %F, 2
@@ -86,18 +151,29 @@ entry:
 }
 
 define dso_local i32 @f5(i32 %F) {
-; T2-LABEL: f5
-; T2:       asrs  r1, r0, #31
-; T2-NEXT:  add.w   r0, r0, r1, lsr #30
-; T2-NEXT:  asrs    r0, r0, #2
-; T2-NEXT:  bx      lr
-
-; T1-LABEL: f5
-; T1: 	    asrs r1, r0, #31
-; T1-NEXT:	lsrs  r1, r1, #30
-; T1-NEXT:	adds  r0, r0, r1
-; T1-NEXT:	asrs  r0, r0, #2
-; T1-NEXT:	bx  lr
+; T2-LABEL: f5:
+; T2:       @ %bb.0: @ %entry
+; T2-NEXT:    asrs r1, r0, #31
+; T2-NEXT:    add.w r0, r0, r1, lsr #30
+; T2-NEXT:    asrs r0, r0, #2
+; T2-NEXT:    bx lr
+;
+; T1-LABEL: f5:
+; T1:       @ %bb.0: @ %entry
+; T1-NEXT:    asrs r1, r0, #31
+; T1-NEXT:    lsrs r1, r1, #30
+; T1-NEXT:    adds r0, r0, r1
+; T1-NEXT:    asrs r0, r0, #2
+; T1-NEXT:    bx lr
+;
+; V6M-LABEL: f5:
+; V6M:       @ %bb.0: @ %entry
+; V6M-NEXT:    asrs r1, r0, #31
+; V6M-NEXT:    lsrs r1, r1, #30
+; V6M-NEXT:    adds r0, r0, r1
+; V6M-NEXT:    asrs r0, r0, #2
+; V6M-NEXT:    bx lr
+
 
 entry:
   %div = sdiv i32 %F, 4
diff --git a/llvm/test/CodeGen/ARM/va_arg.ll b/llvm/test/CodeGen/ARM/va_arg.ll
index 41556b9fa2dec..7228b82b19c5a 100644
--- a/llvm/test/CodeGen/ARM/va_arg.ll
+++ b/llvm/test/CodeGen/ARM/va_arg.ll
@@ -35,7 +35,7 @@ define double @test2(i32 %a, ptr %b, ...) nounwind optsize {
 ; CHECK-NEXT:    add r0, sp, #4
 ; CHECK-NEXT:    stmib sp, {r2, r3}
 ; CHECK-NEXT:    add r0, r0, #11
-; CHECK-NEXT:    bic r0, r0, #3
+; CHECK-NEXT:    bic r0, r0, #7
 ; CHECK-NEXT:    str r2, [r1]
 ; CHECK-NEXT:    add r1, r0, #8
 ; CHECK-NEXT:    str r1, [sp]
diff --git a/llvm/test/CodeGen/Thumb/bic_imm.ll b/llvm/test/CodeGen/Thumb/bic_imm.ll
index 741b2cf8db2e3..d1730c21250a8 100644
--- a/llvm/test/CodeGen/Thumb/bic_imm.ll
+++ b/llvm/test/CodeGen/Thumb/bic_imm.ll
@@ -82,7 +82,7 @@ define void @truncated_neg256(i16 %a, ptr %p) {
 ;
 ; CHECK-T2-LABEL: truncated_neg256:
 ; CHECK-T2:       @ %bb.0:
-; CHECK-T2-NEXT:    bic r0, r0, #255
+; CHECK-T2-NEXT:    and r0, r0, #65280
 ; CHECK-T2-NEXT:    strh r0, [r1]
 ; CHECK-T2-NEXT:    bx lr
   %and = and i16 %a, -256
@@ -90,7 +90,6 @@ define void @truncated_neg256(i16 %a, ptr %p) {
   ret void
 }
 
-; FIXME: Thumb2 supports "bic r0, r0, #510"
 define void @truncated_neg511(i16 %a, ptr %p) {
 ; CHECK-T1-LABEL: truncated_neg511:
 ; CHECK-T1:       @ %bb.0:
@@ -105,8 +104,7 @@ define void @truncated_neg511(i16 %a, ptr %p) {
 ;
 ; CHECK-T2-LABEL: truncated_neg511:
 ; CHECK-T2:       @ %bb.0:
-; CHECK-T2-NEXT:    movw r2, #65025
-; CHECK-T2-NEXT:    ands r0, r2
+; CHECK-T2-NEXT:    bic r0, r0, #510
 ; CHECK-T2-NEXT:    strh r0, [r1]
 ; CHECK-T2-NEXT:    bx lr
   %and = and i16 %a, -511
diff --git a/llvm/test/CodeGen/Thumb/branch-to-return.ll b/llvm/test/CodeGen/Thumb/branch-to-return.ll
index 11e8add6f215b..b190a11aeab17 100644
--- a/llvm/test/CodeGen/Thumb/branch-to-return.ll
+++ b/llvm/test/CodeGen/Thumb/branch-to-return.ll
@@ -26,7 +26,7 @@ define i32 @foo(ptr %x, i32 %n) {
 ; CHECK-NEXT:    ldr.w r0, [r12]
 ; CHECK-NEXT:  .LBB0_6: @ %for.body.preheader1
 ; CHECK-NEXT:    subs r3, r1, r3
-; CHECK-NEXT:    mvn r2, #12
+; CHECK-NEXT:    mvn r2, #15
 ; CHECK-NEXT:    and.w r1, r2, r1, lsl #2
 ; CHECK-NEXT:    add r1, r12
 ; CHECK-NEXT:  .LBB0_7: @ %for.body
diff --git a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
index bcd92f81911b2..b75f1ff742bee 100644
--- a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
+++ b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll
@@ -283,7 +283,7 @@ define void @test_width2(ptr nocapture readnone %x, ptr nocapture %y, i8 zeroext
 ; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
 ; CHECK-NEXT:    adds r0, r2, #1
 ; CHECK-NEXT:    movs r3, #1
-; CHECK-NEXT:    bic r0, r0, #1
+; CHECK-NEXT:    and r0, r0, #510
 ; CHECK-NEXT:    subs r0, #2
 ; CHECK-NEXT:    add.w r0, r3, r0, lsr #1
 ; CHECK-NEXT:    dls lr, r0
>From 5a0106a032eea44e166e380af73a2ab8bddc8a67 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Sun, 26 Oct 2025 19:50:28 -0400
Subject: [PATCH 2/3] s
---
 llvm/test/CodeGen/Thumb2/bf16-instructions.ll | 38 +++++++++----------
 llvm/test/CodeGen/Thumb2/shift_parts.ll       |  4 +-
 2 files changed, 20 insertions(+), 22 deletions(-)
diff --git a/llvm/test/CodeGen/Thumb2/bf16-instructions.ll b/llvm/test/CodeGen/Thumb2/bf16-instructions.ll
index 313d237d54b35..3d0802a7e9ff7 100644
--- a/llvm/test/CodeGen/Thumb2/bf16-instructions.ll
+++ b/llvm/test/CodeGen/Thumb2/bf16-instructions.ll
@@ -2104,12 +2104,11 @@ define bfloat @test_copysign(bfloat %a, bfloat %b) {
 ;
 ; CHECK-FP-LABEL: test_copysign:
 ; CHECK-FP:       @ %bb.0:
-; CHECK-FP-NEXT:    vmov r0, s0
-; CHECK-FP-NEXT:    vmov r1, s1
-; CHECK-FP-NEXT:    and r1, r1, #32768
-; CHECK-FP-NEXT:    bfc r0, #15, #17
-; CHECK-FP-NEXT:    add r0, r1
-; CHECK-FP-NEXT:    vmov.f16 s0, r0
+; CHECK-FP-NEXT:    vmov r0, s1
+; CHECK-FP-NEXT:    vmov r1, s0
+; CHECK-FP-NEXT:    lsrs r0, r0, #15
+; CHECK-FP-NEXT:    bfi r1, r0, #15, #1
+; CHECK-FP-NEXT:    vmov.f16 s0, r1
 ; CHECK-FP-NEXT:    vmov.f16 r0, s0
 ; CHECK-FP-NEXT:    vmov s0, r0
 ; CHECK-FP-NEXT:    bx lr
@@ -2128,11 +2127,11 @@ define bfloat @test_copysign_f32(bfloat %a, float %b) {
 ;
 ; CHECK-FP-LABEL: test_copysign_f32:
 ; CHECK-FP:       @ %bb.0:
-; CHECK-FP-NEXT:    vmov r0, s0
-; CHECK-FP-NEXT:    vmov r1, s1
-; CHECK-FP-NEXT:    and r1, r1, #-2147483648
-; CHECK-FP-NEXT:    bfc r0, #15, #17
-; CHECK-FP-NEXT:    orr.w r0, r0, r1, lsr #16
+; CHECK-FP-NEXT:    vmov r0, s1
+; CHECK-FP-NEXT:    vmov r1, s0
+; CHECK-FP-NEXT:    and r0, r0, #-2147483648
+; CHECK-FP-NEXT:    bic r1, r1, #32768
+; CHECK-FP-NEXT:    orr.w r0, r1, r0, lsr #16
 ; CHECK-FP-NEXT:    vmov.f16 s0, r0
 ; CHECK-FP-NEXT:    vmov.f16 r0, s0
 ; CHECK-FP-NEXT:    vmov s0, r0
@@ -2153,10 +2152,10 @@ define bfloat @test_copysign_f64(bfloat %a, double %b) {
 ;
 ; CHECK-FP-LABEL: test_copysign_f64:
 ; CHECK-FP:       @ %bb.0:
+; CHECK-FP-NEXT:    vmov r0, r1, d1
 ; CHECK-FP-NEXT:    vmov r0, s0
-; CHECK-FP-NEXT:    vmov r1, r2, d1
-; CHECK-FP-NEXT:    and r1, r2, #-2147483648
-; CHECK-FP-NEXT:    bfc r0, #15, #17
+; CHECK-FP-NEXT:    and r1, r1, #-2147483648
+; CHECK-FP-NEXT:    bic r0, r0, #32768
 ; CHECK-FP-NEXT:    orr.w r0, r0, r1, lsr #16
 ; CHECK-FP-NEXT:    vmov.f16 s0, r0
 ; CHECK-FP-NEXT:    vmov.f16 r0, s0
@@ -2180,12 +2179,11 @@ define float @test_copysign_extended(bfloat %a, bfloat %b) {
 ;
 ; CHECK-FP-LABEL: test_copysign_extended:
 ; CHECK-FP:       @ %bb.0:
-; CHECK-FP-NEXT:    vmov r0, s0
-; CHECK-FP-NEXT:    vmov r1, s1
-; CHECK-FP-NEXT:    and r1, r1, #32768
-; CHECK-FP-NEXT:    bfc r0, #15, #17
-; CHECK-FP-NEXT:    add r0, r1
-; CHECK-FP-NEXT:    lsls r0, r0, #16
+; CHECK-FP-NEXT:    vmov r0, s1
+; CHECK-FP-NEXT:    vmov r1, s0
+; CHECK-FP-NEXT:    lsrs r0, r0, #15
+; CHECK-FP-NEXT:    bfi r1, r0, #15, #1
+; CHECK-FP-NEXT:    lsls r0, r1, #16
 ; CHECK-FP-NEXT:    vmov s0, r0
 ; CHECK-FP-NEXT:    bx lr
   %r = call bfloat @llvm.copysign.f16(bfloat %a, bfloat %b)
diff --git a/llvm/test/CodeGen/Thumb2/shift_parts.ll b/llvm/test/CodeGen/Thumb2/shift_parts.ll
index b4ac405d82ed5..060c628013050 100644
--- a/llvm/test/CodeGen/Thumb2/shift_parts.ll
+++ b/llvm/test/CodeGen/Thumb2/shift_parts.ll
@@ -457,7 +457,7 @@ entry:
 define i32 @ashr_demand_bottommask2(i64 %x) {
 ; CHECK-LABEL: ashr_demand_bottommask2:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    mvn r0, #2
+; CHECK-NEXT:    mvn r0, #3
 ; CHECK-NEXT:    and.w r0, r0, r1, lsl #1
 ; CHECK-NEXT:    bx lr
 entry:
@@ -470,7 +470,7 @@ entry:
 define i32 @lshr_demand_bottommask2(i64 %x) {
 ; CHECK-LABEL: lshr_demand_bottommask2:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    mvn r0, #2
+; CHECK-NEXT:    mvn r0, #3
 ; CHECK-NEXT:    and.w r0, r0, r1, lsl #1
 ; CHECK-NEXT:    bx lr
 entry:
>From 147c9b8b29121a6e3930d1fd5aaf2a2f2abb0315 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Sun, 26 Oct 2025 19:51:03 -0400
Subject: [PATCH 3/3] Update sdiv-pow2-thumb-size.ll
---
 llvm/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll | 158 +++++-------------
 1 file changed, 41 insertions(+), 117 deletions(-)
diff --git a/llvm/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll b/llvm/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll
index 7f4315ffcaa6c..4b0419577cdf0 100644
--- a/llvm/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll
+++ b/llvm/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll
@@ -1,4 +1,3 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
 ; RUN: llc -mtriple=thumbv8 %s -o -       | FileCheck %s --check-prefixes=CHECK,T2
 ; RUN: llc -mtriple=thumbv8m.main %s -o - | FileCheck %s --check-prefixes=CHECK,T2
 ; RUN: llc -mtriple=thumbv8m.base %s -o - | FileCheck %s --check-prefixes=CHECK,T1
@@ -14,23 +13,11 @@
 
 ; Test sdiv i16
 define dso_local signext i16 @f0(i16 signext %F) local_unnamed_addr #0 {
-; CHECK-LABEL: f0:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    movs r1, #2
-; CHECK-NEXT:    sdiv r0, r0, r1
-; CHECK-NEXT:    sxth r0, r0
-; CHECK-NEXT:    bx lr
-;
-; V6M-LABEL: f0:
-; V6M:       @ %bb.0: @ %entry
-; V6M-NEXT:    movs r1, #255
-; V6M-NEXT:    lsls r1, r1, #8
-; V6M-NEXT:    ands r1, r0
-; V6M-NEXT:    lsrs r1, r1, #15
-; V6M-NEXT:    adds r0, r0, r1
-; V6M-NEXT:    sxth r0, r0
-; V6M-NEXT:    asrs r0, r0, #1
-; V6M-NEXT:    bx lr
+; CHECK-LABEL: f0
+; CHECK:       movs    r1, #2
+; CHECK-NEXT:  sdiv    r0, r0, r1
+; CHECK-NEXT:  sxth    r0, r0
+; CHECK-NEXT:  bx      lr
 
 entry:
   %0 = sdiv i16 %F, 2
@@ -39,19 +26,10 @@ entry:
 
 ; Same as above, but now with i32
 define dso_local i32 @f1(i32 %F) local_unnamed_addr #0 {
-; CHECK-LABEL: f1:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    movs r1, #4
-; CHECK-NEXT:    sdiv r0, r0, r1
-; CHECK-NEXT:    bx lr
-;
-; V6M-LABEL: f1:
-; V6M:       @ %bb.0: @ %entry
-; V6M-NEXT:    asrs r1, r0, #31
-; V6M-NEXT:    lsrs r1, r1, #30
-; V6M-NEXT:    adds r0, r0, r1
-; V6M-NEXT:    asrs r0, r0, #2
-; V6M-NEXT:    bx lr
+; CHECK-LABEL: f1
+; CHECK:       movs    r1, #4
+; CHECK-NEXT:  sdiv    r0, r0, r1
+; CHECK-NEXT:  bx      lr
 
 entry:
   %div = sdiv i32 %F, 4
@@ -60,18 +38,10 @@ entry:
 
 ; The immediate is not a power of 2, so we expect a sdiv.
 define dso_local i32 @f2(i32 %F) local_unnamed_addr #0 {
-; CHECK-LABEL: f2:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    movs r1, #5
-; CHECK-NEXT:    sdiv r0, r0, r1
-; CHECK-NEXT:    bx lr
-;
-; V6M-LABEL: f2:
-; V6M:       @ %bb.0: @ %entry
-; V6M-NEXT:    push {r7, lr}
-; V6M-NEXT:    movs r1, #5
-; V6M-NEXT:    bl __divsi3
-; V6M-NEXT:    pop {r7, pc}
+; CHECK-LABEL: f2
+; CHECK:       movs    r1, #5
+; CHECK-NEXT:  sdiv    r0, r0, r1
+; CHECK-NEXT:  bx      lr
 
 entry:
   %div = sdiv i32 %F, 5
@@ -81,28 +51,8 @@ entry:
 ; Try a larger power of 2 immediate: immediates larger than
 ; 128 don't give any code size savings.
 define dso_local i32 @f3(i32 %F) local_unnamed_addr #0 {
-; T2-LABEL: f3:
-; T2:       @ %bb.0: @ %entry
-; T2-NEXT:    asrs r1, r0, #31
-; T2-NEXT:    add.w r0, r0, r1, lsr #24
-; T2-NEXT:    asrs r0, r0, #8
-; T2-NEXT:    bx lr
-;
-; T1-LABEL: f3:
-; T1:       @ %bb.0: @ %entry
-; T1-NEXT:    asrs r1, r0, #31
-; T1-NEXT:    lsrs r1, r1, #24
-; T1-NEXT:    adds r0, r0, r1
-; T1-NEXT:    asrs r0, r0, #8
-; T1-NEXT:    bx lr
-;
-; V6M-LABEL: f3:
-; V6M:       @ %bb.0: @ %entry
-; V6M-NEXT:    asrs r1, r0, #31
-; V6M-NEXT:    lsrs r1, r1, #24
-; V6M-NEXT:    adds r0, r0, r1
-; V6M-NEXT:    asrs r0, r0, #8
-; V6M-NEXT:    bx lr
+; CHECK-LABEL:  f3
+; CHECK-NOT:    sdiv
 entry:
   %div = sdiv i32 %F, 256
   ret i32 %div
@@ -115,35 +65,20 @@ attributes #0 = { minsize norecurse nounwind optsize readnone }
 ; the sdiv to sdiv, but to the faster instruction sequence.
 
 define dso_local signext i16 @f4(i16 signext %F) {
-; T2-LABEL: f4:
-; T2:       @ %bb.0: @ %entry
-; T2-NEXT:    and r1, r0, #32768
-; T2-NEXT:    add.w r0, r0, r1, lsr #15
-; T2-NEXT:    sxth r0, r0
-; T2-NEXT:    asrs r0, r0, #1
-; T2-NEXT:    bx lr
-;
-; T1-LABEL: f4:
-; T1:       @ %bb.0: @ %entry
-; T1-NEXT:    movw r1, #65280
-; T1-NEXT:    ands r1, r0
-; T1-NEXT:    lsrs r1, r1, #15
-; T1-NEXT:    adds r0, r0, r1
-; T1-NEXT:    sxth r0, r0
-; T1-NEXT:    asrs r0, r0, #1
-; T1-NEXT:    bx lr
-;
-; V6M-LABEL: f4:
-; V6M:       @ %bb.0: @ %entry
-; V6M-NEXT:    movs r1, #255
-; V6M-NEXT:    lsls r1, r1, #8
-; V6M-NEXT:    ands r1, r0
-; V6M-NEXT:    lsrs r1, r1, #15
-; V6M-NEXT:    adds r0, r0, r1
-; V6M-NEXT:    sxth r0, r0
-; V6M-NEXT:    asrs r0, r0, #1
-; V6M-NEXT:    bx lr
-
+; T2-LABEL:  f4
+; T2:        uxth    r1, r0
+; T2-NEXT:   add.w   r0, r0, r1, lsr #15
+; T2-NEXT:   sxth    r0, r0
+; T2-NEXT:   asrs    r0, r0, #1
+; T2-NEXT:   bx      lr
+
+; T1-LABEL: f4
+; T1: 	    uxth  r1, r0
+; T1-NEXT: 	lsrs  r1, r1, #15
+; T1-NEXT: 	adds  r0, r0, r1
+; T1-NEXT: 	sxth  r0, r0
+; T1-NEXT: 	asrs  r0, r0, #1
+; T1-NEXT: 	bx	lr
 
 entry:
   %0 = sdiv i16 %F, 2
@@ -151,29 +86,18 @@ entry:
 }
 
 define dso_local i32 @f5(i32 %F) {
-; T2-LABEL: f5:
-; T2:       @ %bb.0: @ %entry
-; T2-NEXT:    asrs r1, r0, #31
-; T2-NEXT:    add.w r0, r0, r1, lsr #30
-; T2-NEXT:    asrs r0, r0, #2
-; T2-NEXT:    bx lr
-;
-; T1-LABEL: f5:
-; T1:       @ %bb.0: @ %entry
-; T1-NEXT:    asrs r1, r0, #31
-; T1-NEXT:    lsrs r1, r1, #30
-; T1-NEXT:    adds r0, r0, r1
-; T1-NEXT:    asrs r0, r0, #2
-; T1-NEXT:    bx lr
-;
-; V6M-LABEL: f5:
-; V6M:       @ %bb.0: @ %entry
-; V6M-NEXT:    asrs r1, r0, #31
-; V6M-NEXT:    lsrs r1, r1, #30
-; V6M-NEXT:    adds r0, r0, r1
-; V6M-NEXT:    asrs r0, r0, #2
-; V6M-NEXT:    bx lr
-
+; T2-LABEL: f5
+; T2:       asrs  r1, r0, #31
+; T2-NEXT:  add.w   r0, r0, r1, lsr #30
+; T2-NEXT:  asrs    r0, r0, #2
+; T2-NEXT:  bx      lr
+
+; T1-LABEL: f5
+; T1: 	    asrs r1, r0, #31
+; T1-NEXT:	lsrs  r1, r1, #30
+; T1-NEXT:	adds  r0, r0, r1
+; T1-NEXT:	asrs  r0, r0, #2
+; T1-NEXT:	bx  lr
 
 entry:
   %div = sdiv i32 %F, 4
    
    
More information about the llvm-commits
mailing list