[llvm] ab4fc87 - [DAG] Emit table lookup from TargetLowering::expandCTTZ()

David Green via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 8 04:08:10 PDT 2022


Author: Shubham Narlawar
Date: 2022-08-08T12:08:05+01:00
New Revision: ab4fc87a9d96d759df3c287fd63892165e60e526

URL: https://github.com/llvm/llvm-project/commit/ab4fc87a9d96d759df3c287fd63892165e60e526
DIFF: https://github.com/llvm/llvm-project/commit/ab4fc87a9d96d759df3c287fd63892165e60e526.diff

LOG: [DAG] Emit table lookup from TargetLowering::expandCTTZ()

This patch emits table lookup in expandCTTZ.

Context -
https://reviews.llvm.org/D113291 transforms set of IR instructions to
cttz intrinsic but there are some targets which does not support CTTZ or
CTLZ. Hence, I generate a table lookup in TargetLowering::expandCTTZ().

Differential Revision: https://reviews.llvm.org/D128911

Added: 
    llvm/test/CodeGen/SPARC/cttz.ll

Modified: 
    llvm/include/llvm/CodeGen/TargetLowering.h
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/test/CodeGen/ARM/cttz.ll
    llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
    llvm/test/CodeGen/RISCV/rv32zbb.ll
    llvm/test/CodeGen/RISCV/rv64zbb.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 432ac5caf4995..a061944173174 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -4757,6 +4757,12 @@ class TargetLowering : public TargetLoweringBase {
   /// \returns The expansion result or SDValue() if it fails.
   SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const;
 
+  /// Expand CTTZ via Table Lookup.
+  /// \param N Node to expand
+  /// \returns The expansion result or SDValue() if it fails.
+  SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT,
+                          SDValue Op, unsigned NumBitsPerElt) const;
+
   /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes,
   /// vector nodes can only succeed if all operations are legal/custom.
   /// \param N Node to expand

diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index cbe4ca2ae1200..f5b0c485982f6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -7983,6 +7983,50 @@ SDValue TargetLowering::expandCTLZ(SDNode *Node, SelectionDAG &DAG) const {
   return DAG.getNode(ISD::CTPOP, dl, VT, Op);
 }
 
+SDValue TargetLowering::CTTZTableLookup(SDNode *Node, SelectionDAG &DAG,
+                                        const SDLoc &DL, EVT VT, SDValue Op,
+                                        unsigned BitWidth) const {
+  if (BitWidth != 32 && BitWidth != 64)
+    return SDValue();
+  APInt DeBruijn = BitWidth == 32 ? APInt(32, 0x077CB531U)
+                                  : APInt(64, 0x0218A392CD3D5DBFULL);
+  const DataLayout &TD = DAG.getDataLayout();
+  MachinePointerInfo PtrInfo =
+      MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
+  unsigned ShiftAmt = BitWidth - Log2_32(BitWidth);
+  SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
+  SDValue Lookup = DAG.getNode(
+      ISD::SRL, DL, VT,
+      DAG.getNode(ISD::MUL, DL, VT, DAG.getNode(ISD::AND, DL, VT, Op, Neg),
+                  DAG.getConstant(DeBruijn, DL, VT)),
+      DAG.getConstant(ShiftAmt, DL, VT));
+  Lookup = DAG.getSExtOrTrunc(Lookup, DL, getPointerTy(TD));
+
+  SmallVector<uint8_t> Table(BitWidth, 0);
+  for (unsigned i = 0; i < BitWidth; i++) {
+    APInt Shl = DeBruijn.shl(i);
+    APInt Lshr = Shl.lshr(ShiftAmt);
+    Table[Lshr.getZExtValue()] = i;
+  }
+
+  // Create a ConstantArray in Constant Pool
+  auto *CA = ConstantDataArray::get(*DAG.getContext(), Table);
+  SDValue CPIdx = DAG.getConstantPool(CA, getPointerTy(TD),
+                                      TD.getPrefTypeAlign(CA->getType()));
+  SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, DAG.getEntryNode(),
+                                   DAG.getMemBasePlusOffset(CPIdx, Lookup, DL),
+                                   PtrInfo, MVT::i8);
+  if (Node->getOpcode() != ISD::CTLZ_ZERO_UNDEF) {
+    EVT SetCCVT =
+        getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
+    SDValue Zero = DAG.getConstant(0, DL, VT);
+    SDValue SrcIsZero = DAG.getSetCC(DL, SetCCVT, Op, Zero, ISD::SETEQ);
+    ExtLoad = DAG.getSelect(DL, VT, SrcIsZero,
+                            DAG.getConstant(BitWidth, DL, VT), ExtLoad);
+  }
+  return ExtLoad;
+}
+
 SDValue TargetLowering::expandCTTZ(SDNode *Node, SelectionDAG &DAG) const {
   SDLoc dl(Node);
   EVT VT = Node->getValueType(0);
@@ -8016,6 +8060,12 @@ SDValue TargetLowering::expandCTTZ(SDNode *Node, SelectionDAG &DAG) const {
                         !isOperationLegalOrCustomOrPromote(ISD::XOR, VT)))
     return SDValue();
 
+  // Emit Table Lookup if ISD::CTLZ and ISD::CTPOP are not legal.
+  if (!VT.isVector() && isOperationExpand(ISD::CTPOP, VT) &&
+      !isOperationLegal(ISD::CTLZ, VT))
+    if (SDValue V = CTTZTableLookup(Node, DAG, dl, VT, Op, NumBitsPerElt))
+      return V;
+
   // for now, we use: { return popcount(~x & (x - 1)); }
   // unless the target has ctlz but not ctpop, in which case we use:
   // { return 32 - nlz(~x & (x-1)); }

diff  --git a/llvm/test/CodeGen/ARM/cttz.ll b/llvm/test/CodeGen/ARM/cttz.ll
index c314bc4b8bd36..872ee1eb6a696 100644
--- a/llvm/test/CodeGen/ARM/cttz.ll
+++ b/llvm/test/CodeGen/ARM/cttz.ll
@@ -23,40 +23,31 @@ define i8 @test_i8(i8 %a) {
 ; CHECK-THUMB-LABEL: test_i8:
 ; CHECK-THUMB:       @ %bb.0:
 ; CHECK-THUMB-NEXT:    lsls r1, r0, #24
-; CHECK-THUMB-NEXT:    beq .LBB0_2
+; CHECK-THUMB-NEXT:    beq .LBB0_3
 ; CHECK-THUMB-NEXT:  @ %bb.1: @ %cond.false
-; CHECK-THUMB-NEXT:    subs r1, r0, #1
-; CHECK-THUMB-NEXT:    bics r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r1, #1
-; CHECK-THUMB-NEXT:    ldr r2, .LCPI0_0
-; CHECK-THUMB-NEXT:    ands r2, r0
-; CHECK-THUMB-NEXT:    subs r0, r1, r2
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI0_1
-; CHECK-THUMB-NEXT:    lsrs r2, r0, #2
-; CHECK-THUMB-NEXT:    ands r0, r1
-; CHECK-THUMB-NEXT:    ands r2, r1
-; CHECK-THUMB-NEXT:    adds r0, r0, r2
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI0_2
+; CHECK-THUMB-NEXT:    cmp r0, #0
+; CHECK-THUMB-NEXT:    beq .LBB0_4
+; CHECK-THUMB-NEXT:  @ %bb.2: @ %cond.false
+; CHECK-THUMB-NEXT:    rsbs r1, r0, #0
 ; CHECK-THUMB-NEXT:    ands r1, r0
-; CHECK-THUMB-NEXT:    ldr r0, .LCPI0_3
+; CHECK-THUMB-NEXT:    ldr r0, .LCPI0_0
 ; CHECK-THUMB-NEXT:    muls r0, r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r0, #24
+; CHECK-THUMB-NEXT:    lsrs r0, r0, #27
+; CHECK-THUMB-NEXT:    adr r1, .LCPI0_1
+; CHECK-THUMB-NEXT:    ldrb r0, [r1, r0]
 ; CHECK-THUMB-NEXT:    bx lr
-; CHECK-THUMB-NEXT:  .LBB0_2:
+; CHECK-THUMB-NEXT:  .LBB0_3:
 ; CHECK-THUMB-NEXT:    movs r0, #8
 ; CHECK-THUMB-NEXT:    bx lr
+; CHECK-THUMB-NEXT:  .LBB0_4:
+; CHECK-THUMB-NEXT:    movs r0, #32
+; CHECK-THUMB-NEXT:    bx lr
 ; CHECK-THUMB-NEXT:    .p2align 2
-; CHECK-THUMB-NEXT:  @ %bb.3:
+; CHECK-THUMB-NEXT:  @ %bb.5:
 ; CHECK-THUMB-NEXT:  .LCPI0_0:
-; CHECK-THUMB-NEXT:    .long 1431655765 @ 0x55555555
+; CHECK-THUMB-NEXT:    .long 125613361 @ 0x77cb531
 ; CHECK-THUMB-NEXT:  .LCPI0_1:
-; CHECK-THUMB-NEXT:    .long 858993459 @ 0x33333333
-; CHECK-THUMB-NEXT:  .LCPI0_2:
-; CHECK-THUMB-NEXT:    .long 252645135 @ 0xf0f0f0f
-; CHECK-THUMB-NEXT:  .LCPI0_3:
-; CHECK-THUMB-NEXT:    .long 16843009 @ 0x1010101
+; CHECK-THUMB-NEXT:    .ascii "\000\001\034\002\035\016\030\003\036\026\024\017\031\021\004\b\037\033\r\027\025\023\020\007\032\f\022\006\013\005\n\t"
   %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false)
   ret i8 %tmp
 }
@@ -72,40 +63,31 @@ define i16 @test_i16(i16 %a) {
 ; CHECK-THUMB-LABEL: test_i16:
 ; CHECK-THUMB:       @ %bb.0:
 ; CHECK-THUMB-NEXT:    lsls r1, r0, #16
-; CHECK-THUMB-NEXT:    beq .LBB1_2
+; CHECK-THUMB-NEXT:    beq .LBB1_3
 ; CHECK-THUMB-NEXT:  @ %bb.1: @ %cond.false
-; CHECK-THUMB-NEXT:    subs r1, r0, #1
-; CHECK-THUMB-NEXT:    bics r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r1, #1
-; CHECK-THUMB-NEXT:    ldr r2, .LCPI1_0
-; CHECK-THUMB-NEXT:    ands r2, r0
-; CHECK-THUMB-NEXT:    subs r0, r1, r2
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI1_1
-; CHECK-THUMB-NEXT:    lsrs r2, r0, #2
-; CHECK-THUMB-NEXT:    ands r0, r1
-; CHECK-THUMB-NEXT:    ands r2, r1
-; CHECK-THUMB-NEXT:    adds r0, r0, r2
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI1_2
+; CHECK-THUMB-NEXT:    cmp r0, #0
+; CHECK-THUMB-NEXT:    beq .LBB1_4
+; CHECK-THUMB-NEXT:  @ %bb.2: @ %cond.false
+; CHECK-THUMB-NEXT:    rsbs r1, r0, #0
 ; CHECK-THUMB-NEXT:    ands r1, r0
-; CHECK-THUMB-NEXT:    ldr r0, .LCPI1_3
+; CHECK-THUMB-NEXT:    ldr r0, .LCPI1_0
 ; CHECK-THUMB-NEXT:    muls r0, r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r0, #24
+; CHECK-THUMB-NEXT:    lsrs r0, r0, #27
+; CHECK-THUMB-NEXT:    adr r1, .LCPI1_1
+; CHECK-THUMB-NEXT:    ldrb r0, [r1, r0]
 ; CHECK-THUMB-NEXT:    bx lr
-; CHECK-THUMB-NEXT:  .LBB1_2:
+; CHECK-THUMB-NEXT:  .LBB1_3:
 ; CHECK-THUMB-NEXT:    movs r0, #16
 ; CHECK-THUMB-NEXT:    bx lr
+; CHECK-THUMB-NEXT:  .LBB1_4:
+; CHECK-THUMB-NEXT:    movs r0, #32
+; CHECK-THUMB-NEXT:    bx lr
 ; CHECK-THUMB-NEXT:    .p2align 2
-; CHECK-THUMB-NEXT:  @ %bb.3:
+; CHECK-THUMB-NEXT:  @ %bb.5:
 ; CHECK-THUMB-NEXT:  .LCPI1_0:
-; CHECK-THUMB-NEXT:    .long 1431655765 @ 0x55555555
+; CHECK-THUMB-NEXT:    .long 125613361 @ 0x77cb531
 ; CHECK-THUMB-NEXT:  .LCPI1_1:
-; CHECK-THUMB-NEXT:    .long 858993459 @ 0x33333333
-; CHECK-THUMB-NEXT:  .LCPI1_2:
-; CHECK-THUMB-NEXT:    .long 252645135 @ 0xf0f0f0f
-; CHECK-THUMB-NEXT:  .LCPI1_3:
-; CHECK-THUMB-NEXT:    .long 16843009 @ 0x1010101
+; CHECK-THUMB-NEXT:    .ascii "\000\001\034\002\035\016\030\003\036\026\024\017\031\021\004\b\037\033\r\027\025\023\020\007\032\f\022\006\013\005\n\t"
   %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false)
   ret i16 %tmp
 }
@@ -120,40 +102,28 @@ define i32 @test_i32(i32 %a) {
 ; CHECK-THUMB-LABEL: test_i32:
 ; CHECK-THUMB:       @ %bb.0:
 ; CHECK-THUMB-NEXT:    cmp r0, #0
-; CHECK-THUMB-NEXT:    beq .LBB2_2
+; CHECK-THUMB-NEXT:    beq .LBB2_3
 ; CHECK-THUMB-NEXT:  @ %bb.1: @ %cond.false
-; CHECK-THUMB-NEXT:    subs r1, r0, #1
-; CHECK-THUMB-NEXT:    bics r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r1, #1
-; CHECK-THUMB-NEXT:    ldr r2, .LCPI2_0
-; CHECK-THUMB-NEXT:    ands r2, r0
-; CHECK-THUMB-NEXT:    subs r0, r1, r2
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI2_1
-; CHECK-THUMB-NEXT:    lsrs r2, r0, #2
-; CHECK-THUMB-NEXT:    ands r0, r1
-; CHECK-THUMB-NEXT:    ands r2, r1
-; CHECK-THUMB-NEXT:    adds r0, r0, r2
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI2_2
+; CHECK-THUMB-NEXT:    cmp r0, #0
+; CHECK-THUMB-NEXT:    beq .LBB2_3
+; CHECK-THUMB-NEXT:  @ %bb.2: @ %cond.false
+; CHECK-THUMB-NEXT:    rsbs r1, r0, #0
 ; CHECK-THUMB-NEXT:    ands r1, r0
-; CHECK-THUMB-NEXT:    ldr r0, .LCPI2_3
+; CHECK-THUMB-NEXT:    ldr r0, .LCPI2_0
 ; CHECK-THUMB-NEXT:    muls r0, r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r0, #24
+; CHECK-THUMB-NEXT:    lsrs r0, r0, #27
+; CHECK-THUMB-NEXT:    adr r1, .LCPI2_1
+; CHECK-THUMB-NEXT:    ldrb r0, [r1, r0]
 ; CHECK-THUMB-NEXT:    bx lr
-; CHECK-THUMB-NEXT:  .LBB2_2:
+; CHECK-THUMB-NEXT:  .LBB2_3:
 ; CHECK-THUMB-NEXT:    movs r0, #32
 ; CHECK-THUMB-NEXT:    bx lr
 ; CHECK-THUMB-NEXT:    .p2align 2
-; CHECK-THUMB-NEXT:  @ %bb.3:
+; CHECK-THUMB-NEXT:  @ %bb.4:
 ; CHECK-THUMB-NEXT:  .LCPI2_0:
-; CHECK-THUMB-NEXT:    .long 1431655765 @ 0x55555555
+; CHECK-THUMB-NEXT:    .long 125613361 @ 0x77cb531
 ; CHECK-THUMB-NEXT:  .LCPI2_1:
-; CHECK-THUMB-NEXT:    .long 858993459 @ 0x33333333
-; CHECK-THUMB-NEXT:  .LCPI2_2:
-; CHECK-THUMB-NEXT:    .long 252645135 @ 0xf0f0f0f
-; CHECK-THUMB-NEXT:  .LCPI2_3:
-; CHECK-THUMB-NEXT:    .long 16843009 @ 0x1010101
+; CHECK-THUMB-NEXT:    .ascii "\000\001\034\002\035\016\030\003\036\026\024\017\031\021\004\b\037\033\r\027\025\023\020\007\032\f\022\006\013\005\n\t"
   %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false)
   ret i32 %tmp
 }
@@ -176,56 +146,47 @@ define i64 @test_i64(i64 %a) {
 ; CHECK-THUMB-NEXT:    .save {r4, r5, r7, lr}
 ; CHECK-THUMB-NEXT:    push {r4, r5, r7, lr}
 ; CHECK-THUMB-NEXT:    ldr r5, .LCPI3_0
-; CHECK-THUMB-NEXT:    ldr r4, .LCPI3_1
-; CHECK-THUMB-NEXT:    ldr r3, .LCPI3_2
-; CHECK-THUMB-NEXT:    ldr r2, .LCPI3_3
+; CHECK-THUMB-NEXT:    adr r4, .LCPI3_1
+; CHECK-THUMB-NEXT:    movs r3, #32
 ; CHECK-THUMB-NEXT:    cmp r0, #0
-; CHECK-THUMB-NEXT:    bne .LBB3_2
+; CHECK-THUMB-NEXT:    mov r2, r3
+; CHECK-THUMB-NEXT:    bne .LBB3_5
 ; CHECK-THUMB-NEXT:  @ %bb.1:
-; CHECK-THUMB-NEXT:    subs r0, r1, #1
-; CHECK-THUMB-NEXT:    bics r0, r1
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #1
-; CHECK-THUMB-NEXT:    ands r1, r5
-; CHECK-THUMB-NEXT:    subs r0, r0, r1
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #2
-; CHECK-THUMB-NEXT:    ands r0, r4
-; CHECK-THUMB-NEXT:    ands r1, r4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    ands r0, r3
-; CHECK-THUMB-NEXT:    muls r2, r0, r2
-; CHECK-THUMB-NEXT:    lsrs r0, r2, #24
-; CHECK-THUMB-NEXT:    adds r0, #32
-; CHECK-THUMB-NEXT:    movs r1, #0
-; CHECK-THUMB-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-THUMB-NEXT:    cmp r1, #0
+; CHECK-THUMB-NEXT:    bne .LBB3_6
 ; CHECK-THUMB-NEXT:  .LBB3_2:
-; CHECK-THUMB-NEXT:    subs r1, r0, #1
-; CHECK-THUMB-NEXT:    bics r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r1, #1
-; CHECK-THUMB-NEXT:    ands r0, r5
-; CHECK-THUMB-NEXT:    subs r0, r1, r0
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #2
-; CHECK-THUMB-NEXT:    ands r0, r4
-; CHECK-THUMB-NEXT:    ands r1, r4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    ands r0, r3
-; CHECK-THUMB-NEXT:    muls r2, r0, r2
-; CHECK-THUMB-NEXT:    lsrs r0, r2, #24
+; CHECK-THUMB-NEXT:    cmp r0, #0
+; CHECK-THUMB-NEXT:    bne .LBB3_4
+; CHECK-THUMB-NEXT:  .LBB3_3:
+; CHECK-THUMB-NEXT:    adds r3, #32
+; CHECK-THUMB-NEXT:    mov r2, r3
+; CHECK-THUMB-NEXT:  .LBB3_4:
 ; CHECK-THUMB-NEXT:    movs r1, #0
+; CHECK-THUMB-NEXT:    mov r0, r2
 ; CHECK-THUMB-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-THUMB-NEXT:  .LBB3_5:
+; CHECK-THUMB-NEXT:    rsbs r2, r0, #0
+; CHECK-THUMB-NEXT:    ands r2, r0
+; CHECK-THUMB-NEXT:    muls r2, r5, r2
+; CHECK-THUMB-NEXT:    lsrs r2, r2, #27
+; CHECK-THUMB-NEXT:    ldrb r2, [r4, r2]
+; CHECK-THUMB-NEXT:    cmp r1, #0
+; CHECK-THUMB-NEXT:    beq .LBB3_2
+; CHECK-THUMB-NEXT:  .LBB3_6:
+; CHECK-THUMB-NEXT:    rsbs r3, r1, #0
+; CHECK-THUMB-NEXT:    ands r3, r1
+; CHECK-THUMB-NEXT:    muls r5, r3, r5
+; CHECK-THUMB-NEXT:    lsrs r1, r5, #27
+; CHECK-THUMB-NEXT:    ldrb r3, [r4, r1]
+; CHECK-THUMB-NEXT:    cmp r0, #0
+; CHECK-THUMB-NEXT:    beq .LBB3_3
+; CHECK-THUMB-NEXT:    b .LBB3_4
 ; CHECK-THUMB-NEXT:    .p2align 2
-; CHECK-THUMB-NEXT:  @ %bb.3:
+; CHECK-THUMB-NEXT:  @ %bb.7:
 ; CHECK-THUMB-NEXT:  .LCPI3_0:
-; CHECK-THUMB-NEXT:    .long 1431655765 @ 0x55555555
+; CHECK-THUMB-NEXT:    .long 125613361 @ 0x77cb531
 ; CHECK-THUMB-NEXT:  .LCPI3_1:
-; CHECK-THUMB-NEXT:    .long 858993459 @ 0x33333333
-; CHECK-THUMB-NEXT:  .LCPI3_2:
-; CHECK-THUMB-NEXT:    .long 252645135 @ 0xf0f0f0f
-; CHECK-THUMB-NEXT:  .LCPI3_3:
-; CHECK-THUMB-NEXT:    .long 16843009 @ 0x1010101
+; CHECK-THUMB-NEXT:    .ascii "\000\001\034\002\035\016\030\003\036\026\024\017\031\021\004\b\037\033\r\027\025\023\020\007\032\f\022\006\013\005\n\t"
   %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false)
   ret i64 %tmp
 }
@@ -241,35 +202,26 @@ define i8 @test_i8_zero_undef(i8 %a) {
 ;
 ; CHECK-THUMB-LABEL: test_i8_zero_undef:
 ; CHECK-THUMB:       @ %bb.0:
-; CHECK-THUMB-NEXT:    subs r1, r0, #1
-; CHECK-THUMB-NEXT:    bics r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r1, #1
-; CHECK-THUMB-NEXT:    ldr r2, .LCPI4_0
-; CHECK-THUMB-NEXT:    ands r2, r0
-; CHECK-THUMB-NEXT:    subs r0, r1, r2
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI4_1
-; CHECK-THUMB-NEXT:    lsrs r2, r0, #2
-; CHECK-THUMB-NEXT:    ands r0, r1
-; CHECK-THUMB-NEXT:    ands r2, r1
-; CHECK-THUMB-NEXT:    adds r0, r0, r2
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI4_2
+; CHECK-THUMB-NEXT:    cmp r0, #0
+; CHECK-THUMB-NEXT:    beq .LBB4_2
+; CHECK-THUMB-NEXT:  @ %bb.1:
+; CHECK-THUMB-NEXT:    rsbs r1, r0, #0
 ; CHECK-THUMB-NEXT:    ands r1, r0
-; CHECK-THUMB-NEXT:    ldr r0, .LCPI4_3
+; CHECK-THUMB-NEXT:    ldr r0, .LCPI4_0
 ; CHECK-THUMB-NEXT:    muls r0, r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r0, #24
+; CHECK-THUMB-NEXT:    lsrs r0, r0, #27
+; CHECK-THUMB-NEXT:    adr r1, .LCPI4_1
+; CHECK-THUMB-NEXT:    ldrb r0, [r1, r0]
+; CHECK-THUMB-NEXT:    bx lr
+; CHECK-THUMB-NEXT:  .LBB4_2:
+; CHECK-THUMB-NEXT:    movs r0, #32
 ; CHECK-THUMB-NEXT:    bx lr
 ; CHECK-THUMB-NEXT:    .p2align 2
-; CHECK-THUMB-NEXT:  @ %bb.1:
+; CHECK-THUMB-NEXT:  @ %bb.3:
 ; CHECK-THUMB-NEXT:  .LCPI4_0:
-; CHECK-THUMB-NEXT:    .long 1431655765 @ 0x55555555
+; CHECK-THUMB-NEXT:    .long 125613361 @ 0x77cb531
 ; CHECK-THUMB-NEXT:  .LCPI4_1:
-; CHECK-THUMB-NEXT:    .long 858993459 @ 0x33333333
-; CHECK-THUMB-NEXT:  .LCPI4_2:
-; CHECK-THUMB-NEXT:    .long 252645135 @ 0xf0f0f0f
-; CHECK-THUMB-NEXT:  .LCPI4_3:
-; CHECK-THUMB-NEXT:    .long 16843009 @ 0x1010101
+; CHECK-THUMB-NEXT:    .ascii "\000\001\034\002\035\016\030\003\036\026\024\017\031\021\004\b\037\033\r\027\025\023\020\007\032\f\022\006\013\005\n\t"
   %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true)
   ret i8 %tmp
 }
@@ -283,35 +235,26 @@ define i16 @test_i16_zero_undef(i16 %a) {
 ;
 ; CHECK-THUMB-LABEL: test_i16_zero_undef:
 ; CHECK-THUMB:       @ %bb.0:
-; CHECK-THUMB-NEXT:    subs r1, r0, #1
-; CHECK-THUMB-NEXT:    bics r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r1, #1
-; CHECK-THUMB-NEXT:    ldr r2, .LCPI5_0
-; CHECK-THUMB-NEXT:    ands r2, r0
-; CHECK-THUMB-NEXT:    subs r0, r1, r2
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI5_1
-; CHECK-THUMB-NEXT:    lsrs r2, r0, #2
-; CHECK-THUMB-NEXT:    ands r0, r1
-; CHECK-THUMB-NEXT:    ands r2, r1
-; CHECK-THUMB-NEXT:    adds r0, r0, r2
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI5_2
+; CHECK-THUMB-NEXT:    cmp r0, #0
+; CHECK-THUMB-NEXT:    beq .LBB5_2
+; CHECK-THUMB-NEXT:  @ %bb.1:
+; CHECK-THUMB-NEXT:    rsbs r1, r0, #0
 ; CHECK-THUMB-NEXT:    ands r1, r0
-; CHECK-THUMB-NEXT:    ldr r0, .LCPI5_3
+; CHECK-THUMB-NEXT:    ldr r0, .LCPI5_0
 ; CHECK-THUMB-NEXT:    muls r0, r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r0, #24
+; CHECK-THUMB-NEXT:    lsrs r0, r0, #27
+; CHECK-THUMB-NEXT:    adr r1, .LCPI5_1
+; CHECK-THUMB-NEXT:    ldrb r0, [r1, r0]
+; CHECK-THUMB-NEXT:    bx lr
+; CHECK-THUMB-NEXT:  .LBB5_2:
+; CHECK-THUMB-NEXT:    movs r0, #32
 ; CHECK-THUMB-NEXT:    bx lr
 ; CHECK-THUMB-NEXT:    .p2align 2
-; CHECK-THUMB-NEXT:  @ %bb.1:
+; CHECK-THUMB-NEXT:  @ %bb.3:
 ; CHECK-THUMB-NEXT:  .LCPI5_0:
-; CHECK-THUMB-NEXT:    .long 1431655765 @ 0x55555555
+; CHECK-THUMB-NEXT:    .long 125613361 @ 0x77cb531
 ; CHECK-THUMB-NEXT:  .LCPI5_1:
-; CHECK-THUMB-NEXT:    .long 858993459 @ 0x33333333
-; CHECK-THUMB-NEXT:  .LCPI5_2:
-; CHECK-THUMB-NEXT:    .long 252645135 @ 0xf0f0f0f
-; CHECK-THUMB-NEXT:  .LCPI5_3:
-; CHECK-THUMB-NEXT:    .long 16843009 @ 0x1010101
+; CHECK-THUMB-NEXT:    .ascii "\000\001\034\002\035\016\030\003\036\026\024\017\031\021\004\b\037\033\r\027\025\023\020\007\032\f\022\006\013\005\n\t"
   %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true)
   ret i16 %tmp
 }
@@ -326,35 +269,26 @@ define i32 @test_i32_zero_undef(i32 %a) {
 ;
 ; CHECK-THUMB-LABEL: test_i32_zero_undef:
 ; CHECK-THUMB:       @ %bb.0:
-; CHECK-THUMB-NEXT:    subs r1, r0, #1
-; CHECK-THUMB-NEXT:    bics r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r1, #1
-; CHECK-THUMB-NEXT:    ldr r2, .LCPI6_0
-; CHECK-THUMB-NEXT:    ands r2, r0
-; CHECK-THUMB-NEXT:    subs r0, r1, r2
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI6_1
-; CHECK-THUMB-NEXT:    lsrs r2, r0, #2
-; CHECK-THUMB-NEXT:    ands r0, r1
-; CHECK-THUMB-NEXT:    ands r2, r1
-; CHECK-THUMB-NEXT:    adds r0, r0, r2
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    ldr r1, .LCPI6_2
+; CHECK-THUMB-NEXT:    cmp r0, #0
+; CHECK-THUMB-NEXT:    beq .LBB6_2
+; CHECK-THUMB-NEXT:  @ %bb.1:
+; CHECK-THUMB-NEXT:    rsbs r1, r0, #0
 ; CHECK-THUMB-NEXT:    ands r1, r0
-; CHECK-THUMB-NEXT:    ldr r0, .LCPI6_3
+; CHECK-THUMB-NEXT:    ldr r0, .LCPI6_0
 ; CHECK-THUMB-NEXT:    muls r0, r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r0, #24
+; CHECK-THUMB-NEXT:    lsrs r0, r0, #27
+; CHECK-THUMB-NEXT:    adr r1, .LCPI6_1
+; CHECK-THUMB-NEXT:    ldrb r0, [r1, r0]
+; CHECK-THUMB-NEXT:    bx lr
+; CHECK-THUMB-NEXT:  .LBB6_2:
+; CHECK-THUMB-NEXT:    movs r0, #32
 ; CHECK-THUMB-NEXT:    bx lr
 ; CHECK-THUMB-NEXT:    .p2align 2
-; CHECK-THUMB-NEXT:  @ %bb.1:
+; CHECK-THUMB-NEXT:  @ %bb.3:
 ; CHECK-THUMB-NEXT:  .LCPI6_0:
-; CHECK-THUMB-NEXT:    .long 1431655765 @ 0x55555555
+; CHECK-THUMB-NEXT:    .long 125613361 @ 0x77cb531
 ; CHECK-THUMB-NEXT:  .LCPI6_1:
-; CHECK-THUMB-NEXT:    .long 858993459 @ 0x33333333
-; CHECK-THUMB-NEXT:  .LCPI6_2:
-; CHECK-THUMB-NEXT:    .long 252645135 @ 0xf0f0f0f
-; CHECK-THUMB-NEXT:  .LCPI6_3:
-; CHECK-THUMB-NEXT:    .long 16843009 @ 0x1010101
+; CHECK-THUMB-NEXT:    .ascii "\000\001\034\002\035\016\030\003\036\026\024\017\031\021\004\b\037\033\r\027\025\023\020\007\032\f\022\006\013\005\n\t"
   %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true)
   ret i32 %tmp
 }
@@ -377,56 +311,47 @@ define i64 @test_i64_zero_undef(i64 %a) {
 ; CHECK-THUMB-NEXT:    .save {r4, r5, r7, lr}
 ; CHECK-THUMB-NEXT:    push {r4, r5, r7, lr}
 ; CHECK-THUMB-NEXT:    ldr r5, .LCPI7_0
-; CHECK-THUMB-NEXT:    ldr r4, .LCPI7_1
-; CHECK-THUMB-NEXT:    ldr r3, .LCPI7_2
-; CHECK-THUMB-NEXT:    ldr r2, .LCPI7_3
+; CHECK-THUMB-NEXT:    adr r4, .LCPI7_1
+; CHECK-THUMB-NEXT:    movs r3, #32
 ; CHECK-THUMB-NEXT:    cmp r0, #0
-; CHECK-THUMB-NEXT:    bne .LBB7_2
+; CHECK-THUMB-NEXT:    mov r2, r3
+; CHECK-THUMB-NEXT:    bne .LBB7_5
 ; CHECK-THUMB-NEXT:  @ %bb.1:
-; CHECK-THUMB-NEXT:    subs r0, r1, #1
-; CHECK-THUMB-NEXT:    bics r0, r1
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #1
-; CHECK-THUMB-NEXT:    ands r1, r5
-; CHECK-THUMB-NEXT:    subs r0, r0, r1
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #2
-; CHECK-THUMB-NEXT:    ands r0, r4
-; CHECK-THUMB-NEXT:    ands r1, r4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    ands r0, r3
-; CHECK-THUMB-NEXT:    muls r2, r0, r2
-; CHECK-THUMB-NEXT:    lsrs r0, r2, #24
-; CHECK-THUMB-NEXT:    adds r0, #32
-; CHECK-THUMB-NEXT:    movs r1, #0
-; CHECK-THUMB-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-THUMB-NEXT:    cmp r1, #0
+; CHECK-THUMB-NEXT:    bne .LBB7_6
 ; CHECK-THUMB-NEXT:  .LBB7_2:
-; CHECK-THUMB-NEXT:    subs r1, r0, #1
-; CHECK-THUMB-NEXT:    bics r1, r0
-; CHECK-THUMB-NEXT:    lsrs r0, r1, #1
-; CHECK-THUMB-NEXT:    ands r0, r5
-; CHECK-THUMB-NEXT:    subs r0, r1, r0
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #2
-; CHECK-THUMB-NEXT:    ands r0, r4
-; CHECK-THUMB-NEXT:    ands r1, r4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    lsrs r1, r0, #4
-; CHECK-THUMB-NEXT:    adds r0, r0, r1
-; CHECK-THUMB-NEXT:    ands r0, r3
-; CHECK-THUMB-NEXT:    muls r2, r0, r2
-; CHECK-THUMB-NEXT:    lsrs r0, r2, #24
+; CHECK-THUMB-NEXT:    cmp r0, #0
+; CHECK-THUMB-NEXT:    bne .LBB7_4
+; CHECK-THUMB-NEXT:  .LBB7_3:
+; CHECK-THUMB-NEXT:    adds r3, #32
+; CHECK-THUMB-NEXT:    mov r2, r3
+; CHECK-THUMB-NEXT:  .LBB7_4:
 ; CHECK-THUMB-NEXT:    movs r1, #0
+; CHECK-THUMB-NEXT:    mov r0, r2
 ; CHECK-THUMB-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-THUMB-NEXT:  .LBB7_5:
+; CHECK-THUMB-NEXT:    rsbs r2, r0, #0
+; CHECK-THUMB-NEXT:    ands r2, r0
+; CHECK-THUMB-NEXT:    muls r2, r5, r2
+; CHECK-THUMB-NEXT:    lsrs r2, r2, #27
+; CHECK-THUMB-NEXT:    ldrb r2, [r4, r2]
+; CHECK-THUMB-NEXT:    cmp r1, #0
+; CHECK-THUMB-NEXT:    beq .LBB7_2
+; CHECK-THUMB-NEXT:  .LBB7_6:
+; CHECK-THUMB-NEXT:    rsbs r3, r1, #0
+; CHECK-THUMB-NEXT:    ands r3, r1
+; CHECK-THUMB-NEXT:    muls r5, r3, r5
+; CHECK-THUMB-NEXT:    lsrs r1, r5, #27
+; CHECK-THUMB-NEXT:    ldrb r3, [r4, r1]
+; CHECK-THUMB-NEXT:    cmp r0, #0
+; CHECK-THUMB-NEXT:    beq .LBB7_3
+; CHECK-THUMB-NEXT:    b .LBB7_4
 ; CHECK-THUMB-NEXT:    .p2align 2
-; CHECK-THUMB-NEXT:  @ %bb.3:
+; CHECK-THUMB-NEXT:  @ %bb.7:
 ; CHECK-THUMB-NEXT:  .LCPI7_0:
-; CHECK-THUMB-NEXT:    .long 1431655765 @ 0x55555555
+; CHECK-THUMB-NEXT:    .long 125613361 @ 0x77cb531
 ; CHECK-THUMB-NEXT:  .LCPI7_1:
-; CHECK-THUMB-NEXT:    .long 858993459 @ 0x33333333
-; CHECK-THUMB-NEXT:  .LCPI7_2:
-; CHECK-THUMB-NEXT:    .long 252645135 @ 0xf0f0f0f
-; CHECK-THUMB-NEXT:  .LCPI7_3:
-; CHECK-THUMB-NEXT:    .long 16843009 @ 0x1010101
+; CHECK-THUMB-NEXT:    .ascii "\000\001\034\002\035\016\030\003\036\026\024\017\031\021\004\b\037\033\r\027\025\023\020\007\032\f\022\006\013\005\n\t"
   %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 true)
   ret i64 %tmp
 }

diff  --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index f2496dddced3b..390e3d9b3f5d1 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -170,139 +170,112 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
 define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a0, .LBB2_2
+; RV32I-NEXT:    beqz a0, .LBB2_4
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, a0, -1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi a1, a1, 819
-; RV32I-NEXT:    and a2, a0, a1
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    add a0, a2, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi a1, a1, -241
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    and a0, s0, a0
+; RV32I-NEXT:    lui a1, 30667
+; RV32I-NEXT:    addi a1, a1, 1329
 ; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    li a0, 32
+; RV32I-NEXT:    beqz s0, .LBB2_3
+; RV32I-NEXT:  # %bb.2: # %cond.false
+; RV32I-NEXT:    srli a0, a1, 27
+; RV32I-NEXT:    lui a1, %hi(.LCPI2_0)
+; RV32I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:  .LBB2_3: # %cond.false
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:  .LBB2_4:
 ; RV32I-NEXT:    li a0, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_cttz_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a1, a0
-; RV64I-NEXT:    beqz a1, .LBB2_2
-; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addiw a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 61681
-; RV64I-NEXT:    addiw a1, a1, -241
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sext.w s0, a0
+; RV64I-NEXT:    beqz s0, .LBB2_3
+; RV64I-NEXT:  # %bb.1: # %cond.false
+; RV64I-NEXT:    neg a1, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 4112
-; RV64I-NEXT:    addiw a1, a1, 257
+; RV64I-NEXT:    lui a1, 30667
+; RV64I-NEXT:    addiw a1, a1, 1329
 ; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srliw a0, a0, 24
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    li a0, 32
+; RV64I-NEXT:    beqz s0, .LBB2_4
+; RV64I-NEXT:  # %bb.2: # %cond.false
+; RV64I-NEXT:    srliw a0, a1, 27
+; RV64I-NEXT:    lui a1, %hi(.LCPI2_0)
+; RV64I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    j .LBB2_4
+; RV64I-NEXT:  .LBB2_3:
+; RV64I-NEXT:    li a0, 32
+; RV64I-NEXT:  .LBB2_4: # %cond.end
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB2_2:
-; RV64I-NEXT:    li a0, 32
-; RV64I-NEXT:    ret
 ;
 ; RV32M-LABEL: test_cttz_i32:
 ; RV32M:       # %bb.0:
-; RV32M-NEXT:    beqz a0, .LBB2_2
+; RV32M-NEXT:    beqz a0, .LBB2_4
 ; RV32M-NEXT:  # %bb.1: # %cond.false
-; RV32M-NEXT:    addi a1, a0, -1
-; RV32M-NEXT:    not a0, a0
-; RV32M-NEXT:    and a0, a0, a1
-; RV32M-NEXT:    srli a1, a0, 1
-; RV32M-NEXT:    lui a2, 349525
-; RV32M-NEXT:    addi a2, a2, 1365
-; RV32M-NEXT:    and a1, a1, a2
-; RV32M-NEXT:    sub a0, a0, a1
-; RV32M-NEXT:    lui a1, 209715
-; RV32M-NEXT:    addi a1, a1, 819
-; RV32M-NEXT:    and a2, a0, a1
-; RV32M-NEXT:    srli a0, a0, 2
-; RV32M-NEXT:    and a0, a0, a1
-; RV32M-NEXT:    add a0, a2, a0
-; RV32M-NEXT:    srli a1, a0, 4
-; RV32M-NEXT:    add a0, a0, a1
-; RV32M-NEXT:    lui a1, 61681
-; RV32M-NEXT:    addi a1, a1, -241
-; RV32M-NEXT:    and a0, a0, a1
-; RV32M-NEXT:    lui a1, 4112
-; RV32M-NEXT:    addi a1, a1, 257
+; RV32M-NEXT:    mv a1, a0
+; RV32M-NEXT:    li a0, 32
+; RV32M-NEXT:    beqz a1, .LBB2_3
+; RV32M-NEXT:  # %bb.2: # %cond.false
+; RV32M-NEXT:    neg a0, a1
+; RV32M-NEXT:    and a0, a1, a0
+; RV32M-NEXT:    lui a1, 30667
+; RV32M-NEXT:    addi a1, a1, 1329
 ; RV32M-NEXT:    mul a0, a0, a1
-; RV32M-NEXT:    srli a0, a0, 24
+; RV32M-NEXT:    srli a0, a0, 27
+; RV32M-NEXT:    lui a1, %hi(.LCPI2_0)
+; RV32M-NEXT:    addi a1, a1, %lo(.LCPI2_0)
+; RV32M-NEXT:    add a0, a1, a0
+; RV32M-NEXT:    lbu a0, 0(a0)
+; RV32M-NEXT:  .LBB2_3: # %cond.end
 ; RV32M-NEXT:    ret
-; RV32M-NEXT:  .LBB2_2:
+; RV32M-NEXT:  .LBB2_4:
 ; RV32M-NEXT:    li a0, 32
 ; RV32M-NEXT:    ret
 ;
 ; RV64M-LABEL: test_cttz_i32:
 ; RV64M:       # %bb.0:
-; RV64M-NEXT:    sext.w a1, a0
-; RV64M-NEXT:    beqz a1, .LBB2_2
+; RV64M-NEXT:    sext.w a2, a0
+; RV64M-NEXT:    beqz a2, .LBB2_4
 ; RV64M-NEXT:  # %bb.1: # %cond.false
-; RV64M-NEXT:    addiw a1, a0, -1
-; RV64M-NEXT:    not a0, a0
-; RV64M-NEXT:    and a0, a0, a1
-; RV64M-NEXT:    srli a1, a0, 1
-; RV64M-NEXT:    lui a2, 349525
-; RV64M-NEXT:    addiw a2, a2, 1365
-; RV64M-NEXT:    and a1, a1, a2
-; RV64M-NEXT:    sub a0, a0, a1
-; RV64M-NEXT:    lui a1, 209715
-; RV64M-NEXT:    addiw a1, a1, 819
-; RV64M-NEXT:    and a2, a0, a1
-; RV64M-NEXT:    srli a0, a0, 2
-; RV64M-NEXT:    and a0, a0, a1
-; RV64M-NEXT:    add a0, a2, a0
-; RV64M-NEXT:    srli a1, a0, 4
-; RV64M-NEXT:    add a0, a0, a1
-; RV64M-NEXT:    lui a1, 61681
-; RV64M-NEXT:    addiw a1, a1, -241
-; RV64M-NEXT:    and a0, a0, a1
-; RV64M-NEXT:    lui a1, 4112
-; RV64M-NEXT:    addiw a1, a1, 257
+; RV64M-NEXT:    mv a1, a0
+; RV64M-NEXT:    li a0, 32
+; RV64M-NEXT:    beqz a2, .LBB2_3
+; RV64M-NEXT:  # %bb.2: # %cond.false
+; RV64M-NEXT:    neg a0, a1
+; RV64M-NEXT:    and a0, a1, a0
+; RV64M-NEXT:    lui a1, 30667
+; RV64M-NEXT:    addiw a1, a1, 1329
 ; RV64M-NEXT:    mulw a0, a0, a1
-; RV64M-NEXT:    srliw a0, a0, 24
+; RV64M-NEXT:    srliw a0, a0, 27
+; RV64M-NEXT:    lui a1, %hi(.LCPI2_0)
+; RV64M-NEXT:    addi a1, a1, %lo(.LCPI2_0)
+; RV64M-NEXT:    add a0, a1, a0
+; RV64M-NEXT:    lbu a0, 0(a0)
+; RV64M-NEXT:  .LBB2_3: # %cond.end
 ; RV64M-NEXT:    ret
-; RV64M-NEXT:  .LBB2_2:
+; RV64M-NEXT:  .LBB2_4:
 ; RV64M-NEXT:    li a0, 32
 ; RV64M-NEXT:    ret
 ;
@@ -330,56 +303,39 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    mv s2, a0
-; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    not a1, s2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi s4, a2, 1365
-; RV32I-NEXT:    and a1, a1, s4
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi s5, a1, 819
-; RV32I-NEXT:    and a1, a0, s5
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s5
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi s6, a1, -241
-; RV32I-NEXT:    and a0, a0, s6
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi s3, a1, 257
-; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a0, s1, -1
-; RV32I-NEXT:    not a1, s1
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    and a1, a1, s4
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    and a1, a0, s5
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s5
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    and a0, a0, s6
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    and a0, s0, a0
+; RV32I-NEXT:    lui a1, 30667
+; RV32I-NEXT:    addi s3, a1, 1329
 ; RV32I-NEXT:    mv a1, s3
 ; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    bnez s2, .LBB3_2
+; RV32I-NEXT:    lui a1, %hi(.LCPI3_0)
+; RV32I-NEXT:    addi s5, a1, %lo(.LCPI3_0)
+; RV32I-NEXT:    li s4, 32
+; RV32I-NEXT:    li s2, 32
+; RV32I-NEXT:    beqz s0, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    addi a0, a0, 32
-; RV32I-NEXT:    j .LBB3_3
+; RV32I-NEXT:    srli a0, a0, 27
+; RV32I-NEXT:    add a0, s5, a0
+; RV32I-NEXT:    lbu s2, 0(a0)
 ; RV32I-NEXT:  .LBB3_2:
-; RV32I-NEXT:    srli a0, s0, 24
-; RV32I-NEXT:  .LBB3_3:
+; RV32I-NEXT:    neg a0, s1
+; RV32I-NEXT:    and a0, s1, a0
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    beqz s1, .LBB3_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    srli a0, a0, 27
+; RV32I-NEXT:    add a0, s5, a0
+; RV32I-NEXT:    lbu s4, 0(a0)
+; RV32I-NEXT:  .LBB3_4:
+; RV32I-NEXT:    bnez s0, .LBB3_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    addi s2, s4, 32
+; RV32I-NEXT:  .LBB3_6:
+; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -388,124 +344,98 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_cttz_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a0, .LBB3_2
+; RV64I-NEXT:    beqz a0, .LBB3_4
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, %hi(.LCPI3_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI3_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI3_1)(a2)
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    and a1, a0, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI3_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI3_2)(a2)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a1, %hi(.LCPI3_3)
-; RV64I-NEXT:    ld a1, %lo(.LCPI3_3)(a1)
 ; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srli a0, a0, 56
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    li a0, 64
+; RV64I-NEXT:    beqz s0, .LBB3_3
+; RV64I-NEXT:  # %bb.2: # %cond.false
+; RV64I-NEXT:    srli a0, a1, 58
+; RV64I-NEXT:    lui a1, %hi(.LCPI3_1)
+; RV64I-NEXT:    addi a1, a1, %lo(.LCPI3_1)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:  .LBB3_3: # %cond.false
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB3_2:
+; RV64I-NEXT:  .LBB3_4:
 ; RV64I-NEXT:    li a0, 64
 ; RV64I-NEXT:    ret
 ;
 ; RV32M-LABEL: test_cttz_i64:
 ; RV32M:       # %bb.0:
-; RV32M-NEXT:    lui a2, 349525
-; RV32M-NEXT:    addi a5, a2, 1365
-; RV32M-NEXT:    lui a2, 209715
-; RV32M-NEXT:    addi a4, a2, 819
-; RV32M-NEXT:    lui a2, 61681
-; RV32M-NEXT:    addi a2, a2, -241
-; RV32M-NEXT:    lui a3, 4112
-; RV32M-NEXT:    addi a3, a3, 257
-; RV32M-NEXT:    bnez a0, .LBB3_2
+; RV32M-NEXT:    lui a2, 30667
+; RV32M-NEXT:    addi a4, a2, 1329
+; RV32M-NEXT:    lui a2, %hi(.LCPI3_0)
+; RV32M-NEXT:    addi a5, a2, %lo(.LCPI3_0)
+; RV32M-NEXT:    li a3, 32
+; RV32M-NEXT:    li a2, 32
+; RV32M-NEXT:    bnez a0, .LBB3_5
 ; RV32M-NEXT:  # %bb.1:
-; RV32M-NEXT:    addi a0, a1, -1
-; RV32M-NEXT:    not a1, a1
-; RV32M-NEXT:    and a0, a1, a0
-; RV32M-NEXT:    srli a1, a0, 1
-; RV32M-NEXT:    and a1, a1, a5
-; RV32M-NEXT:    sub a0, a0, a1
-; RV32M-NEXT:    and a1, a0, a4
-; RV32M-NEXT:    srli a0, a0, 2
-; RV32M-NEXT:    and a0, a0, a4
-; RV32M-NEXT:    add a0, a1, a0
-; RV32M-NEXT:    srli a1, a0, 4
-; RV32M-NEXT:    add a0, a0, a1
-; RV32M-NEXT:    and a0, a0, a2
-; RV32M-NEXT:    mul a0, a0, a3
-; RV32M-NEXT:    srli a0, a0, 24
-; RV32M-NEXT:    addi a0, a0, 32
-; RV32M-NEXT:    li a1, 0
-; RV32M-NEXT:    ret
+; RV32M-NEXT:    bnez a1, .LBB3_6
 ; RV32M-NEXT:  .LBB3_2:
-; RV32M-NEXT:    addi a1, a0, -1
-; RV32M-NEXT:    not a0, a0
-; RV32M-NEXT:    and a0, a0, a1
-; RV32M-NEXT:    srli a1, a0, 1
-; RV32M-NEXT:    and a1, a1, a5
-; RV32M-NEXT:    sub a0, a0, a1
-; RV32M-NEXT:    and a1, a0, a4
-; RV32M-NEXT:    srli a0, a0, 2
-; RV32M-NEXT:    and a0, a0, a4
-; RV32M-NEXT:    add a0, a1, a0
-; RV32M-NEXT:    srli a1, a0, 4
-; RV32M-NEXT:    add a0, a0, a1
-; RV32M-NEXT:    and a0, a0, a2
-; RV32M-NEXT:    mul a0, a0, a3
-; RV32M-NEXT:    srli a0, a0, 24
+; RV32M-NEXT:    bnez a0, .LBB3_4
+; RV32M-NEXT:  .LBB3_3:
+; RV32M-NEXT:    addi a2, a3, 32
+; RV32M-NEXT:  .LBB3_4:
+; RV32M-NEXT:    mv a0, a2
 ; RV32M-NEXT:    li a1, 0
 ; RV32M-NEXT:    ret
+; RV32M-NEXT:  .LBB3_5:
+; RV32M-NEXT:    neg a2, a0
+; RV32M-NEXT:    and a2, a0, a2
+; RV32M-NEXT:    mul a2, a2, a4
+; RV32M-NEXT:    srli a2, a2, 27
+; RV32M-NEXT:    add a2, a5, a2
+; RV32M-NEXT:    lbu a2, 0(a2)
+; RV32M-NEXT:    beqz a1, .LBB3_2
+; RV32M-NEXT:  .LBB3_6:
+; RV32M-NEXT:    neg a3, a1
+; RV32M-NEXT:    and a1, a1, a3
+; RV32M-NEXT:    mul a1, a1, a4
+; RV32M-NEXT:    srli a1, a1, 27
+; RV32M-NEXT:    add a1, a5, a1
+; RV32M-NEXT:    lbu a3, 0(a1)
+; RV32M-NEXT:    beqz a0, .LBB3_3
+; RV32M-NEXT:    j .LBB3_4
 ;
 ; RV64M-LABEL: test_cttz_i64:
 ; RV64M:       # %bb.0:
-; RV64M-NEXT:    beqz a0, .LBB3_2
+; RV64M-NEXT:    beqz a0, .LBB3_4
 ; RV64M-NEXT:  # %bb.1: # %cond.false
-; RV64M-NEXT:    addi a1, a0, -1
-; RV64M-NEXT:    not a0, a0
-; RV64M-NEXT:    and a0, a0, a1
-; RV64M-NEXT:    lui a1, %hi(.LCPI3_0)
-; RV64M-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
-; RV64M-NEXT:    lui a2, %hi(.LCPI3_1)
-; RV64M-NEXT:    ld a2, %lo(.LCPI3_1)(a2)
-; RV64M-NEXT:    srli a3, a0, 1
-; RV64M-NEXT:    and a1, a3, a1
-; RV64M-NEXT:    sub a0, a0, a1
-; RV64M-NEXT:    and a1, a0, a2
-; RV64M-NEXT:    srli a0, a0, 2
-; RV64M-NEXT:    and a0, a0, a2
+; RV64M-NEXT:    mv a1, a0
+; RV64M-NEXT:    li a0, 64
+; RV64M-NEXT:    beqz a1, .LBB3_3
+; RV64M-NEXT:  # %bb.2: # %cond.false
+; RV64M-NEXT:    lui a0, %hi(.LCPI3_0)
+; RV64M-NEXT:    ld a0, %lo(.LCPI3_0)(a0)
+; RV64M-NEXT:    neg a2, a1
+; RV64M-NEXT:    and a1, a1, a2
+; RV64M-NEXT:    mul a0, a1, a0
+; RV64M-NEXT:    srli a0, a0, 58
+; RV64M-NEXT:    lui a1, %hi(.LCPI3_1)
+; RV64M-NEXT:    addi a1, a1, %lo(.LCPI3_1)
 ; RV64M-NEXT:    add a0, a1, a0
-; RV64M-NEXT:    lui a1, %hi(.LCPI3_2)
-; RV64M-NEXT:    ld a1, %lo(.LCPI3_2)(a1)
-; RV64M-NEXT:    lui a2, %hi(.LCPI3_3)
-; RV64M-NEXT:    ld a2, %lo(.LCPI3_3)(a2)
-; RV64M-NEXT:    srli a3, a0, 4
-; RV64M-NEXT:    add a0, a0, a3
-; RV64M-NEXT:    and a0, a0, a1
-; RV64M-NEXT:    mul a0, a0, a2
-; RV64M-NEXT:    srli a0, a0, 56
+; RV64M-NEXT:    lbu a0, 0(a0)
+; RV64M-NEXT:  .LBB3_3: # %cond.end
 ; RV64M-NEXT:    ret
-; RV64M-NEXT:  .LBB3_2:
+; RV64M-NEXT:  .LBB3_4:
 ; RV64M-NEXT:    li a0, 64
 ; RV64M-NEXT:    ret
 ;
@@ -645,30 +575,25 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, a0, -1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi a1, a1, 819
-; RV32I-NEXT:    and a2, a0, a1
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    add a0, a2, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi a1, a1, -241
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    and a0, s0, a0
+; RV32I-NEXT:    lui a1, 30667
+; RV32I-NEXT:    addi a1, a1, 1329
 ; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    li a0, 32
+; RV32I-NEXT:    beqz s0, .LBB6_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    srli a0, a1, 27
+; RV32I-NEXT:    lui a1, %hi(.LCPI6_0)
+; RV32I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:  .LBB6_2:
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
@@ -676,85 +601,65 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addiw a1, a0, -1
-; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sext.w s0, a0
+; RV64I-NEXT:    neg a1, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 61681
-; RV64I-NEXT:    addiw a1, a1, -241
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 4112
-; RV64I-NEXT:    addiw a1, a1, 257
+; RV64I-NEXT:    lui a1, 30667
+; RV64I-NEXT:    addiw a1, a1, 1329
 ; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srliw a0, a0, 24
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    li a0, 32
+; RV64I-NEXT:    beqz s0, .LBB6_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    srliw a0, a1, 27
+; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
+; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:  .LBB6_2:
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV32M-LABEL: test_cttz_i32_zero_undef:
 ; RV32M:       # %bb.0:
-; RV32M-NEXT:    addi a1, a0, -1
-; RV32M-NEXT:    not a0, a0
-; RV32M-NEXT:    and a0, a0, a1
-; RV32M-NEXT:    srli a1, a0, 1
-; RV32M-NEXT:    lui a2, 349525
-; RV32M-NEXT:    addi a2, a2, 1365
-; RV32M-NEXT:    and a1, a1, a2
-; RV32M-NEXT:    sub a0, a0, a1
-; RV32M-NEXT:    lui a1, 209715
-; RV32M-NEXT:    addi a1, a1, 819
-; RV32M-NEXT:    and a2, a0, a1
-; RV32M-NEXT:    srli a0, a0, 2
-; RV32M-NEXT:    and a0, a0, a1
-; RV32M-NEXT:    add a0, a2, a0
-; RV32M-NEXT:    srli a1, a0, 4
-; RV32M-NEXT:    add a0, a0, a1
-; RV32M-NEXT:    lui a1, 61681
-; RV32M-NEXT:    addi a1, a1, -241
+; RV32M-NEXT:    li a1, 32
+; RV32M-NEXT:    beqz a0, .LBB6_2
+; RV32M-NEXT:  # %bb.1:
+; RV32M-NEXT:    neg a1, a0
 ; RV32M-NEXT:    and a0, a0, a1
-; RV32M-NEXT:    lui a1, 4112
-; RV32M-NEXT:    addi a1, a1, 257
+; RV32M-NEXT:    lui a1, 30667
+; RV32M-NEXT:    addi a1, a1, 1329
 ; RV32M-NEXT:    mul a0, a0, a1
-; RV32M-NEXT:    srli a0, a0, 24
+; RV32M-NEXT:    srli a0, a0, 27
+; RV32M-NEXT:    lui a1, %hi(.LCPI6_0)
+; RV32M-NEXT:    addi a1, a1, %lo(.LCPI6_0)
+; RV32M-NEXT:    add a0, a1, a0
+; RV32M-NEXT:    lbu a1, 0(a0)
+; RV32M-NEXT:  .LBB6_2:
+; RV32M-NEXT:    mv a0, a1
 ; RV32M-NEXT:    ret
 ;
 ; RV64M-LABEL: test_cttz_i32_zero_undef:
 ; RV64M:       # %bb.0:
-; RV64M-NEXT:    addiw a1, a0, -1
-; RV64M-NEXT:    not a0, a0
-; RV64M-NEXT:    and a0, a0, a1
-; RV64M-NEXT:    srli a1, a0, 1
-; RV64M-NEXT:    lui a2, 349525
-; RV64M-NEXT:    addiw a2, a2, 1365
-; RV64M-NEXT:    and a1, a1, a2
-; RV64M-NEXT:    sub a0, a0, a1
-; RV64M-NEXT:    lui a1, 209715
-; RV64M-NEXT:    addiw a1, a1, 819
-; RV64M-NEXT:    and a2, a0, a1
-; RV64M-NEXT:    srli a0, a0, 2
-; RV64M-NEXT:    and a0, a0, a1
-; RV64M-NEXT:    add a0, a2, a0
-; RV64M-NEXT:    srli a1, a0, 4
-; RV64M-NEXT:    add a0, a0, a1
-; RV64M-NEXT:    lui a1, 61681
-; RV64M-NEXT:    addiw a1, a1, -241
+; RV64M-NEXT:    sext.w a2, a0
+; RV64M-NEXT:    li a1, 32
+; RV64M-NEXT:    beqz a2, .LBB6_2
+; RV64M-NEXT:  # %bb.1:
+; RV64M-NEXT:    neg a1, a0
 ; RV64M-NEXT:    and a0, a0, a1
-; RV64M-NEXT:    lui a1, 4112
-; RV64M-NEXT:    addiw a1, a1, 257
+; RV64M-NEXT:    lui a1, 30667
+; RV64M-NEXT:    addiw a1, a1, 1329
 ; RV64M-NEXT:    mulw a0, a0, a1
-; RV64M-NEXT:    srliw a0, a0, 24
+; RV64M-NEXT:    srliw a0, a0, 27
+; RV64M-NEXT:    lui a1, %hi(.LCPI6_0)
+; RV64M-NEXT:    addi a1, a1, %lo(.LCPI6_0)
+; RV64M-NEXT:    add a0, a1, a0
+; RV64M-NEXT:    lbu a1, 0(a0)
+; RV64M-NEXT:  .LBB6_2:
+; RV64M-NEXT:    mv a0, a1
 ; RV64M-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: test_cttz_i32_zero_undef:
@@ -781,56 +686,39 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    mv s2, a0
-; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    not a1, s2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi s4, a2, 1365
-; RV32I-NEXT:    and a1, a1, s4
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi s5, a1, 819
-; RV32I-NEXT:    and a1, a0, s5
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s5
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi s6, a1, -241
-; RV32I-NEXT:    and a0, a0, s6
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi s3, a1, 257
-; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a0, s1, -1
-; RV32I-NEXT:    not a1, s1
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    and a1, a1, s4
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    and a1, a0, s5
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s5
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    and a0, a0, s6
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    and a0, s0, a0
+; RV32I-NEXT:    lui a1, 30667
+; RV32I-NEXT:    addi s3, a1, 1329
 ; RV32I-NEXT:    mv a1, s3
 ; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    bnez s2, .LBB7_2
+; RV32I-NEXT:    lui a1, %hi(.LCPI7_0)
+; RV32I-NEXT:    addi s5, a1, %lo(.LCPI7_0)
+; RV32I-NEXT:    li s4, 32
+; RV32I-NEXT:    li s2, 32
+; RV32I-NEXT:    beqz s0, .LBB7_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    addi a0, a0, 32
-; RV32I-NEXT:    j .LBB7_3
+; RV32I-NEXT:    srli a0, a0, 27
+; RV32I-NEXT:    add a0, s5, a0
+; RV32I-NEXT:    lbu s2, 0(a0)
 ; RV32I-NEXT:  .LBB7_2:
-; RV32I-NEXT:    srli a0, s0, 24
-; RV32I-NEXT:  .LBB7_3:
+; RV32I-NEXT:    neg a0, s1
+; RV32I-NEXT:    and a0, s1, a0
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    beqz s1, .LBB7_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    srli a0, a0, 27
+; RV32I-NEXT:    add a0, s5, a0
+; RV32I-NEXT:    lbu s4, 0(a0)
+; RV32I-NEXT:  .LBB7_4:
+; RV32I-NEXT:    bnez s0, .LBB7_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    addi s2, s4, 32
+; RV32I-NEXT:  .LBB7_6:
+; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -839,7 +727,6 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;
@@ -847,107 +734,82 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, %hi(.LCPI7_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI7_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI7_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI7_1)(a2)
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    and a1, a0, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI7_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI7_2)(a2)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a1, %hi(.LCPI7_3)
-; RV64I-NEXT:    ld a1, %lo(.LCPI7_3)(a1)
 ; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srli a0, a0, 56
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    li a0, 64
+; RV64I-NEXT:    beqz s0, .LBB7_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    srli a0, a1, 58
+; RV64I-NEXT:    lui a1, %hi(.LCPI7_1)
+; RV64I-NEXT:    addi a1, a1, %lo(.LCPI7_1)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:  .LBB7_2:
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV32M-LABEL: test_cttz_i64_zero_undef:
 ; RV32M:       # %bb.0:
-; RV32M-NEXT:    lui a2, 349525
-; RV32M-NEXT:    addi a5, a2, 1365
-; RV32M-NEXT:    lui a2, 209715
-; RV32M-NEXT:    addi a4, a2, 819
-; RV32M-NEXT:    lui a2, 61681
-; RV32M-NEXT:    addi a2, a2, -241
-; RV32M-NEXT:    lui a3, 4112
-; RV32M-NEXT:    addi a3, a3, 257
-; RV32M-NEXT:    bnez a0, .LBB7_2
+; RV32M-NEXT:    lui a2, 30667
+; RV32M-NEXT:    addi a4, a2, 1329
+; RV32M-NEXT:    lui a2, %hi(.LCPI7_0)
+; RV32M-NEXT:    addi a5, a2, %lo(.LCPI7_0)
+; RV32M-NEXT:    li a3, 32
+; RV32M-NEXT:    li a2, 32
+; RV32M-NEXT:    bnez a0, .LBB7_5
 ; RV32M-NEXT:  # %bb.1:
-; RV32M-NEXT:    addi a0, a1, -1
-; RV32M-NEXT:    not a1, a1
-; RV32M-NEXT:    and a0, a1, a0
-; RV32M-NEXT:    srli a1, a0, 1
-; RV32M-NEXT:    and a1, a1, a5
-; RV32M-NEXT:    sub a0, a0, a1
-; RV32M-NEXT:    and a1, a0, a4
-; RV32M-NEXT:    srli a0, a0, 2
-; RV32M-NEXT:    and a0, a0, a4
-; RV32M-NEXT:    add a0, a1, a0
-; RV32M-NEXT:    srli a1, a0, 4
-; RV32M-NEXT:    add a0, a0, a1
-; RV32M-NEXT:    and a0, a0, a2
-; RV32M-NEXT:    mul a0, a0, a3
-; RV32M-NEXT:    srli a0, a0, 24
-; RV32M-NEXT:    addi a0, a0, 32
-; RV32M-NEXT:    li a1, 0
-; RV32M-NEXT:    ret
+; RV32M-NEXT:    bnez a1, .LBB7_6
 ; RV32M-NEXT:  .LBB7_2:
-; RV32M-NEXT:    addi a1, a0, -1
-; RV32M-NEXT:    not a0, a0
-; RV32M-NEXT:    and a0, a0, a1
-; RV32M-NEXT:    srli a1, a0, 1
-; RV32M-NEXT:    and a1, a1, a5
-; RV32M-NEXT:    sub a0, a0, a1
-; RV32M-NEXT:    and a1, a0, a4
-; RV32M-NEXT:    srli a0, a0, 2
-; RV32M-NEXT:    and a0, a0, a4
-; RV32M-NEXT:    add a0, a1, a0
-; RV32M-NEXT:    srli a1, a0, 4
-; RV32M-NEXT:    add a0, a0, a1
-; RV32M-NEXT:    and a0, a0, a2
-; RV32M-NEXT:    mul a0, a0, a3
-; RV32M-NEXT:    srli a0, a0, 24
+; RV32M-NEXT:    bnez a0, .LBB7_4
+; RV32M-NEXT:  .LBB7_3:
+; RV32M-NEXT:    addi a2, a3, 32
+; RV32M-NEXT:  .LBB7_4:
+; RV32M-NEXT:    mv a0, a2
 ; RV32M-NEXT:    li a1, 0
 ; RV32M-NEXT:    ret
+; RV32M-NEXT:  .LBB7_5:
+; RV32M-NEXT:    neg a2, a0
+; RV32M-NEXT:    and a2, a0, a2
+; RV32M-NEXT:    mul a2, a2, a4
+; RV32M-NEXT:    srli a2, a2, 27
+; RV32M-NEXT:    add a2, a5, a2
+; RV32M-NEXT:    lbu a2, 0(a2)
+; RV32M-NEXT:    beqz a1, .LBB7_2
+; RV32M-NEXT:  .LBB7_6:
+; RV32M-NEXT:    neg a3, a1
+; RV32M-NEXT:    and a1, a1, a3
+; RV32M-NEXT:    mul a1, a1, a4
+; RV32M-NEXT:    srli a1, a1, 27
+; RV32M-NEXT:    add a1, a5, a1
+; RV32M-NEXT:    lbu a3, 0(a1)
+; RV32M-NEXT:    beqz a0, .LBB7_3
+; RV32M-NEXT:    j .LBB7_4
 ;
 ; RV64M-LABEL: test_cttz_i64_zero_undef:
 ; RV64M:       # %bb.0:
-; RV64M-NEXT:    addi a1, a0, -1
-; RV64M-NEXT:    not a0, a0
-; RV64M-NEXT:    and a0, a0, a1
+; RV64M-NEXT:    li a1, 64
+; RV64M-NEXT:    beqz a0, .LBB7_2
+; RV64M-NEXT:  # %bb.1:
 ; RV64M-NEXT:    lui a1, %hi(.LCPI7_0)
 ; RV64M-NEXT:    ld a1, %lo(.LCPI7_0)(a1)
-; RV64M-NEXT:    lui a2, %hi(.LCPI7_1)
-; RV64M-NEXT:    ld a2, %lo(.LCPI7_1)(a2)
-; RV64M-NEXT:    srli a3, a0, 1
-; RV64M-NEXT:    and a1, a3, a1
-; RV64M-NEXT:    sub a0, a0, a1
-; RV64M-NEXT:    and a1, a0, a2
-; RV64M-NEXT:    srli a0, a0, 2
+; RV64M-NEXT:    neg a2, a0
 ; RV64M-NEXT:    and a0, a0, a2
+; RV64M-NEXT:    mul a0, a0, a1
+; RV64M-NEXT:    srli a0, a0, 58
+; RV64M-NEXT:    lui a1, %hi(.LCPI7_1)
+; RV64M-NEXT:    addi a1, a1, %lo(.LCPI7_1)
 ; RV64M-NEXT:    add a0, a1, a0
-; RV64M-NEXT:    lui a1, %hi(.LCPI7_2)
-; RV64M-NEXT:    ld a1, %lo(.LCPI7_2)(a1)
-; RV64M-NEXT:    lui a2, %hi(.LCPI7_3)
-; RV64M-NEXT:    ld a2, %lo(.LCPI7_3)(a2)
-; RV64M-NEXT:    srli a3, a0, 4
-; RV64M-NEXT:    add a0, a0, a3
-; RV64M-NEXT:    and a0, a0, a1
-; RV64M-NEXT:    mul a0, a0, a2
-; RV64M-NEXT:    srli a0, a0, 56
+; RV64M-NEXT:    lbu a1, 0(a0)
+; RV64M-NEXT:  .LBB7_2:
+; RV64M-NEXT:    mv a0, a1
 ; RV64M-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: test_cttz_i64_zero_undef:

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 8a077c870b774..ddae369e1a674 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -171,37 +171,32 @@ declare i32 @llvm.cttz.i32(i32, i1)
 define i32 @cttz_i32(i32 %a) nounwind {
 ; RV32I-LABEL: cttz_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a0, .LBB2_2
+; RV32I-NEXT:    beqz a0, .LBB2_4
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, a0, -1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi a1, a1, 819
-; RV32I-NEXT:    and a2, a0, a1
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    add a0, a2, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi a1, a1, -241
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    and a0, s0, a0
+; RV32I-NEXT:    lui a1, 30667
+; RV32I-NEXT:    addi a1, a1, 1329
 ; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:    li a0, 32
+; RV32I-NEXT:    beqz s0, .LBB2_3
+; RV32I-NEXT:  # %bb.2: # %cond.false
+; RV32I-NEXT:    srli a0, a1, 27
+; RV32I-NEXT:    lui a1, %hi(.LCPI2_0)
+; RV32I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    lbu a0, 0(a0)
+; RV32I-NEXT:  .LBB2_3: # %cond.false
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:  .LBB2_4:
 ; RV32I-NEXT:    li a0, 32
 ; RV32I-NEXT:    ret
 ;
@@ -226,56 +221,39 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    mv s2, a0
-; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    not a1, s2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi s4, a2, 1365
-; RV32I-NEXT:    and a1, a1, s4
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi s5, a1, 819
-; RV32I-NEXT:    and a1, a0, s5
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s5
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi s6, a1, -241
-; RV32I-NEXT:    and a0, a0, s6
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi s3, a1, 257
-; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a0, s1, -1
-; RV32I-NEXT:    not a1, s1
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    and a1, a1, s4
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    and a1, a0, s5
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s5
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    and a0, a0, s6
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    and a0, s0, a0
+; RV32I-NEXT:    lui a1, 30667
+; RV32I-NEXT:    addi s3, a1, 1329
 ; RV32I-NEXT:    mv a1, s3
 ; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    bnez s2, .LBB3_2
+; RV32I-NEXT:    lui a1, %hi(.LCPI3_0)
+; RV32I-NEXT:    addi s5, a1, %lo(.LCPI3_0)
+; RV32I-NEXT:    li s4, 32
+; RV32I-NEXT:    li s2, 32
+; RV32I-NEXT:    beqz s0, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    addi a0, a0, 32
-; RV32I-NEXT:    j .LBB3_3
+; RV32I-NEXT:    srli a0, a0, 27
+; RV32I-NEXT:    add a0, s5, a0
+; RV32I-NEXT:    lbu s2, 0(a0)
 ; RV32I-NEXT:  .LBB3_2:
-; RV32I-NEXT:    srli a0, s0, 24
-; RV32I-NEXT:  .LBB3_3:
+; RV32I-NEXT:    neg a0, s1
+; RV32I-NEXT:    and a0, s1, a0
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    beqz s1, .LBB3_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    srli a0, a0, 27
+; RV32I-NEXT:    add a0, s5, a0
+; RV32I-NEXT:    lbu s4, 0(a0)
+; RV32I-NEXT:  .LBB3_4:
+; RV32I-NEXT:    bnez s0, .LBB3_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    addi s2, s4, 32
+; RV32I-NEXT:  .LBB3_6:
+; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -284,7 +262,6 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 32
 ; RV32I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index ca3f3f331cc51..11050ecefc693 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -368,40 +368,34 @@ declare i32 @llvm.cttz.i32(i32, i1)
 define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: cttz_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a1, a0
-; RV64I-NEXT:    beqz a1, .LBB6_2
-; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addiw a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 61681
-; RV64I-NEXT:    addiw a1, a1, -241
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sext.w s0, a0
+; RV64I-NEXT:    beqz s0, .LBB6_3
+; RV64I-NEXT:  # %bb.1: # %cond.false
+; RV64I-NEXT:    neg a1, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 4112
-; RV64I-NEXT:    addiw a1, a1, 257
+; RV64I-NEXT:    lui a1, 30667
+; RV64I-NEXT:    addiw a1, a1, 1329
 ; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srliw a0, a0, 24
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    li a0, 32
+; RV64I-NEXT:    beqz s0, .LBB6_4
+; RV64I-NEXT:  # %bb.2: # %cond.false
+; RV64I-NEXT:    srliw a0, a1, 27
+; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
+; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:    j .LBB6_4
+; RV64I-NEXT:  .LBB6_3:
+; RV64I-NEXT:    li a0, 32
+; RV64I-NEXT:  .LBB6_4: # %cond.end
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB6_2:
-; RV64I-NEXT:    li a0, 32
-; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: cttz_i32:
 ; RV64ZBB:       # %bb.0:
@@ -416,30 +410,25 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addiw a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 61681
-; RV64I-NEXT:    addiw a1, a1, -241
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 4112
-; RV64I-NEXT:    addiw a1, a1, 257
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, s0, a0
+; RV64I-NEXT:    lui a1, 30667
+; RV64I-NEXT:    addiw a1, a1, 1329
 ; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srliw a0, a0, 24
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    li a0, 32
+; RV64I-NEXT:    beqz s0, .LBB7_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    srliw a0, a1, 27
+; RV64I-NEXT:    lui a1, %hi(.LCPI7_0)
+; RV64I-NEXT:    addi a1, a1, %lo(.LCPI7_0)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:  .LBB7_2:
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
@@ -458,34 +447,25 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addiw a0, a0, -1
-; RV64I-NEXT:    not a1, s0
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 61681
-; RV64I-NEXT:    addiw a1, a1, -241
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 4112
-; RV64I-NEXT:    addiw a1, a1, 257
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, s0, a0
+; RV64I-NEXT:    lui a1, 30667
+; RV64I-NEXT:    addiw a1, a1, 1329
 ; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    li a0, -1
+; RV64I-NEXT:    li a1, 32
 ; RV64I-NEXT:    beqz s0, .LBB8_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    srliw a0, a1, 24
+; RV64I-NEXT:    srliw a0, a0, 27
+; RV64I-NEXT:    lui a1, %hi(.LCPI8_0)
+; RV64I-NEXT:    addi a1, a1, %lo(.LCPI8_0)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lbu a1, 0(a0)
 ; RV64I-NEXT:  .LBB8_2:
+; RV64I-NEXT:    li a0, -1
+; RV64I-NEXT:    beqz s0, .LBB8_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB8_4:
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -509,42 +489,35 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: ffs_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addiw a0, a0, -1
-; RV64I-NEXT:    not a1, s0
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 61681
-; RV64I-NEXT:    addiw a1, a1, -241
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 4112
-; RV64I-NEXT:    addiw a1, a1, 257
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s1, a0
+; RV64I-NEXT:    li s0, 0
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, s1, a0
+; RV64I-NEXT:    lui a1, 30667
+; RV64I-NEXT:    addiw a1, a1, 1329
 ; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    li a0, 0
-; RV64I-NEXT:    beqz s0, .LBB9_2
+; RV64I-NEXT:    li a1, 32
+; RV64I-NEXT:    beqz s1, .LBB9_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    srliw a0, a1, 24
-; RV64I-NEXT:    addi a0, a0, 1
+; RV64I-NEXT:    srliw a0, a0, 27
+; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
+; RV64I-NEXT:    addi a1, a1, %lo(.LCPI9_0)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lbu a1, 0(a0)
 ; RV64I-NEXT:  .LBB9_2:
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    beqz s1, .LBB9_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    addi s0, a1, 1
+; RV64I-NEXT:  .LBB9_4:
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: ffs_i32:
@@ -569,37 +542,32 @@ declare i64 @llvm.cttz.i64(i64, i1)
 define i64 @cttz_i64(i64 %a) nounwind {
 ; RV64I-LABEL: cttz_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a0, .LBB10_2
+; RV64I-NEXT:    beqz a0, .LBB10_4
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, s0, a0
 ; RV64I-NEXT:    lui a1, %hi(.LCPI10_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI10_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI10_1)(a2)
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    and a1, a0, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI10_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI10_2)(a2)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a1, %hi(.LCPI10_3)
-; RV64I-NEXT:    ld a1, %lo(.LCPI10_3)(a1)
 ; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srli a0, a0, 56
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    li a0, 64
+; RV64I-NEXT:    beqz s0, .LBB10_3
+; RV64I-NEXT:  # %bb.2: # %cond.false
+; RV64I-NEXT:    srli a0, a1, 58
+; RV64I-NEXT:    lui a1, %hi(.LCPI10_1)
+; RV64I-NEXT:    addi a1, a1, %lo(.LCPI10_1)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lbu a0, 0(a0)
+; RV64I-NEXT:  .LBB10_3: # %cond.false
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB10_2:
+; RV64I-NEXT:  .LBB10_4:
 ; RV64I-NEXT:    li a0, 64
 ; RV64I-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/SPARC/cttz.ll b/llvm/test/CodeGen/SPARC/cttz.ll
new file mode 100644
index 0000000000000..fad74b7f5c71a
--- /dev/null
+++ b/llvm/test/CodeGen/SPARC/cttz.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -march=sparc -mcpu=v9 | FileCheck %s
+
+define i32 @f(i32 %x) {
+; CHECK-LABEL: f:
+; CHECK:         .cfi_startproc
+; CHECK-NEXT:  ! %bb.0: ! %entry
+; CHECK-NEXT:    mov %g0, %o1
+; CHECK-NEXT:    sub %o1, %o0, %o1
+; CHECK-NEXT:    and %o0, %o1, %o1
+; CHECK-NEXT:    sethi 122669, %o2
+; CHECK-NEXT:    or %o2, 305, %o2
+; CHECK-NEXT:    smul %o1, %o2, %o1
+; CHECK-NEXT:    srl %o1, 27, %o1
+; CHECK-NEXT:    sethi %hi(.LCPI0_0), %o2
+; CHECK-NEXT:    add %o2, %lo(.LCPI0_0), %o2
+; CHECK-NEXT:    ldub [%o2+%o1], %o1
+; CHECK-NEXT:    cmp %o0, 0
+; CHECK-NEXT:    move %icc, 32, %o1
+; CHECK-NEXT:    move %icc, 0, %o1
+; CHECK-NEXT:    retl
+; CHECK-NEXT:    mov %o1, %o0
+entry:
+  %0 = call i32 @llvm.cttz.i32(i32 %x, i1 true)
+  %1 = icmp eq i32 %x, 0
+  %2 = select i1 %1, i32 0, i32 %0
+  %3 = trunc i32 %2 to i8
+  %conv = zext i8 %3 to i32
+  ret i32 %conv
+}
+
+define i64 @g(i64 %x) {
+; CHECK-LABEL: g:
+; CHECK:         .cfi_startproc
+; CHECK-NEXT:  ! %bb.0: ! %entry
+; CHECK-NEXT:    mov %g0, %o2
+; CHECK-NEXT:    sub %o2, %o1, %o3
+; CHECK-NEXT:    and %o1, %o3, %o3
+; CHECK-NEXT:    sethi 122669, %o4
+; CHECK-NEXT:    or %o4, 305, %o4
+; CHECK-NEXT:    smul %o3, %o4, %o3
+; CHECK-NEXT:    srl %o3, 27, %o3
+; CHECK-NEXT:    sethi %hi(.LCPI1_0), %o5
+; CHECK-NEXT:    add %o5, %lo(.LCPI1_0), %o5
+; CHECK-NEXT:    ldub [%o5+%o3], %g2
+; CHECK-NEXT:    sub %o2, %o0, %o3
+; CHECK-NEXT:    and %o0, %o3, %o3
+; CHECK-NEXT:    smul %o3, %o4, %o3
+; CHECK-NEXT:    srl %o3, 27, %o3
+; CHECK-NEXT:    ldub [%o5+%o3], %o3
+; CHECK-NEXT:    cmp %o1, 0
+; CHECK-NEXT:    move %icc, 32, %g2
+; CHECK-NEXT:    cmp %o0, 0
+; CHECK-NEXT:    move %icc, 32, %o3
+; CHECK-NEXT:    add %o3, 32, %o3
+; CHECK-NEXT:    cmp %o1, 0
+; CHECK-NEXT:    movne %icc, %g2, %o3
+; CHECK-NEXT:    or %o1, %o0, %o0
+; CHECK-NEXT:    cmp %o0, 0
+; CHECK-NEXT:    move %icc, 0, %o3
+; CHECK-NEXT:    mov %o2, %o0
+; CHECK-NEXT:    retl
+; CHECK-NEXT:    mov %o3, %o1
+entry:
+  %0 = call i64 @llvm.cttz.i64(i64 %x, i1 true)
+  %1 = icmp eq i64 %x, 0
+  %2 = select i1 %1, i64 0, i64 %0
+  %3 = trunc i64 %2 to i32
+  %conv = zext i32 %3 to i64
+  ret i64 %conv
+}
+
+; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
+declare i32 @llvm.cttz.i32(i32, i1 immarg) #0
+declare i64 @llvm.cttz.i64(i64, i1 immarg) #0
+
+attributes #0 = { nocallback nofree nosync nounwind readnone speculatable willreturn }


        


More information about the llvm-commits mailing list