[llvm] [RISCV][GISel] Port TrailingOnesMask PatLeaf. (PR #119427)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 10 10:10:54 PST 2024


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/119427

None

>From 1c836095f8532f9047f49e4d693dbbb98caf67e8 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 10 Dec 2024 10:07:48 -0800
Subject: [PATCH] [RISCV][GISel] Port TrailingOnesMask PatLeaf.

---
 .../RISCV/GISel/RISCVInstructionSelector.cpp  |  10 +
 llvm/lib/Target/RISCV/RISCVInstrInfo.td       |  14 +-
 .../CodeGen/RISCV/GlobalISel/double-arith.ll  |  34 ++-
 .../RISCV/GlobalISel/double-convert.ll        |  20 +-
 .../RISCV/GlobalISel/double-intrinsics.ll     |  38 ++-
 .../CodeGen/RISCV/GlobalISel/float-arith.ll   |  30 +--
 .../CodeGen/RISCV/GlobalISel/float-convert.ll |  20 +-
 .../RISCV/GlobalISel/float-intrinsics.ll      | 240 ++++++++----------
 llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll   |  20 +-
 .../instruction-select/alu-rv32.mir           |   7 +-
 .../CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll  |   5 +-
 llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll |  10 +-
 .../test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll |  25 +-
 llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll |  10 +-
 .../CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll  |   5 +-
 llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll |  10 +-
 .../test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll |  33 +--
 17 files changed, 248 insertions(+), 283 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index d525834ce76c2c..985264c591e105 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -134,6 +134,8 @@ class RISCVInstructionSelector : public InstructionSelector {
 
   void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
                            int OpIdx) const;
+  void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
+                                 const MachineInstr &MI, int OpIdx) const;
 
   const RISCVSubtarget &STI;
   const RISCVInstrInfo &TII;
@@ -861,6 +863,14 @@ void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
   MIB.addImm(llvm::countr_zero(C));
 }
 
+void RISCVInstructionSelector::renderXLenSubTrailingOnes(
+    MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
+  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
+         "Expected G_CONSTANT");
+  uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
+  MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
+}
+
 const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
     LLT Ty, const RegisterBank &RB) const {
   if (RB.getID() == RISCV::GPRBRegBankID) {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 14b571cebe1fec..7cae93e53d7700 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -476,6 +476,8 @@ def XLenSubTrailingOnes : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant(XLen - TrailingOnes, SDLoc(N),
                                    N->getValueType(0));
 }]>;
+def GIXLenSubTrailingOnes : GICustomOperandRenderer<"renderXLenSubTrailingOnes">,
+  GISDNodeXFormEquiv<XLenSubTrailingOnes>;
 
 // Checks if this mask is a non-empty sequence of ones starting at the
 // most/least significant bit with the remainder zero and exceeds simm32/simm12.
@@ -489,7 +491,17 @@ def TrailingOnesMask : PatLeaf<(imm), [{
   if (!N->hasOneUse())
     return false;
   return !isInt<12>(N->getSExtValue()) && isMask_64(N->getZExtValue());
-}], XLenSubTrailingOnes>;
+}], XLenSubTrailingOnes> {
+  let GISelPredicateCode = [{
+    if (!MRI.hasOneNonDBGUse(MI.getOperand(0).getReg()))
+      return false;
+    const auto &MO = MI.getOperand(1);
+    if (!MO.isCImm())
+      return false;
+    return !isInt<12>(MO.getCImm()->getSExtValue()) &&
+           isMask_64(MO.getCImm()->getZExtValue());
+  }];
+}
 
 // Similar to LeadingOnesMask, but only consider leading ones in the lower 32
 // bits.
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
index 828c6053c8ff72..534fec21ce7c47 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
@@ -161,8 +161,8 @@ define double @fsgnj_d(double %a, double %b) nounwind {
 ; RV32I-LABEL: fsgnj_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    addi a4, a2, -1
-; RV32I-NEXT:    and a1, a1, a4
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    and a2, a3, a2
 ; RV32I-NEXT:    or a1, a1, a2
 ; RV32I-NEXT:    ret
@@ -170,10 +170,10 @@ define double @fsgnj_d(double %a, double %b) nounwind {
 ; RV64I-LABEL: fsgnj_d:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a2, -1
-; RV64I-NEXT:    slli a3, a2, 63
-; RV64I-NEXT:    srli a2, a2, 1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call double @llvm.copysign.f64(double %a, double %b)
@@ -241,9 +241,9 @@ define double @fsgnjn_d(double %a, double %b) nounwind {
 ; RV32I-LABEL: fsgnjn_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 524288
+; RV32I-NEXT:    slli a1, a1, 1
 ; RV32I-NEXT:    xor a3, a3, a2
-; RV32I-NEXT:    addi a4, a2, -1
-; RV32I-NEXT:    and a1, a1, a4
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    and a2, a3, a2
 ; RV32I-NEXT:    or a1, a1, a2
 ; RV32I-NEXT:    ret
@@ -251,11 +251,11 @@ define double @fsgnjn_d(double %a, double %b) nounwind {
 ; RV64I-LABEL: fsgnjn_d:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a2, -1
-; RV64I-NEXT:    slli a3, a2, 63
-; RV64I-NEXT:    srli a2, a2, 1
-; RV64I-NEXT:    xor a1, a1, a3
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    xor a1, a1, a2
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = fneg double %b
@@ -281,9 +281,8 @@ define double @fabs_d(double %a, double %b) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a3, a1
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a1, a3, a1
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -296,9 +295,8 @@ define double @fabs_d(double %a, double %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    li a0, -1
+; RV64I-NEXT:    slli a0, a0, 1
 ; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll
index 6b623fd6867a2d..81d3381449bc87 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll
@@ -787,17 +787,15 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_wu_s_i16:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
-; RV32IFD-NEXT:    lui a1, 16
-; RV32IFD-NEXT:    addi a1, a1, -1
-; RV32IFD-NEXT:    and a0, a0, a1
+; RV32IFD-NEXT:    slli a0, a0, 16
+; RV32IFD-NEXT:    srli a0, a0, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcvt_wu_s_i16:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
-; RV64IFD-NEXT:    lui a1, 16
-; RV64IFD-NEXT:    addiw a1, a1, -1
-; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    slli a0, a0, 48
+; RV64IFD-NEXT:    srli a0, a0, 48
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_i16:
@@ -805,9 +803,8 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __fixunsdfsi
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -817,9 +814,8 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __fixunsdfsi
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
index 04bfbbb6e694f4..8d77d41ab6b455 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll
@@ -631,16 +631,14 @@ define double @fabs_f64(double %a) nounwind {
 ;
 ; RV32I-LABEL: fabs_f64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fabs_f64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    srli a1, a1, 1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    ret
   %1 = call double @llvm.fabs.f64(double %a)
   ret double %1
@@ -715,8 +713,8 @@ define double @copysign_f64(double %a, double %b) nounwind {
 ; RV32I-LABEL: copysign_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    addi a4, a2, -1
-; RV32I-NEXT:    and a1, a1, a4
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    and a2, a3, a2
 ; RV32I-NEXT:    or a1, a1, a2
 ; RV32I-NEXT:    ret
@@ -724,10 +722,10 @@ define double @copysign_f64(double %a, double %b) nounwind {
 ; RV64I-LABEL: copysign_f64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a2, -1
-; RV64I-NEXT:    slli a3, a2, 63
-; RV64I-NEXT:    srli a2, a2, 1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call double @llvm.copysign.f64(double %a, double %b)
@@ -1039,10 +1037,9 @@ define i1 @isnan_d_fpclass(double %x) {
 ;
 ; RV32I-LABEL: isnan_d_fpclass:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    addi a3, a2, -1
 ; RV32I-NEXT:    lui a2, 524032
-; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    beq a1, a2, .LBB25_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu a0, a2, a1
@@ -1053,12 +1050,11 @@ define i1 @isnan_d_fpclass(double %x) {
 ;
 ; RV64I-LABEL: isnan_d_fpclass:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    li a2, 2047
-; RV64I-NEXT:    srli a1, a1, 1
-; RV64I-NEXT:    slli a2, a2, 52
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    sltu a0, a2, a0
+; RV64I-NEXT:    li a1, 2047
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    slli a1, a1, 52
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    sltu a0, a1, a0
 ; RV64I-NEXT:    ret
   %1 = call i1 @llvm.is.fpclass.f64(double %x, i32 3)  ; nan
   ret i1 %1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll
index f5272989161509..3a60856665742a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll
@@ -161,8 +161,8 @@ define float @fsgnj_s(float %a, float %b) nounwind {
 ; RV32I-LABEL: fsgnj_s:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    addi a3, a2, -1
-; RV32I-NEXT:    and a0, a0, a3
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    and a1, a1, a2
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -170,8 +170,8 @@ define float @fsgnj_s(float %a, float %b) nounwind {
 ; RV64I-LABEL: fsgnj_s:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a2, 524288
-; RV64I-NEXT:    addiw a3, a2, -1
-; RV64I-NEXT:    and a0, a0, a3
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -238,11 +238,11 @@ define float @fsgnjn_s(float %a, float %b) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lui a1, 524288
+; RV32I-NEXT:    slli s0, s0, 1
 ; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    addi a2, a1, -1
-; RV32I-NEXT:    and a2, s0, a2
+; RV32I-NEXT:    srli s0, s0, 1
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a0, s0, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -256,11 +256,11 @@ define float @fsgnjn_s(float %a, float %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    lui a1, 524288
+; RV64I-NEXT:    slli s0, s0, 33
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    addiw a2, a1, -1
-; RV64I-NEXT:    and a2, s0, a2
+; RV64I-NEXT:    srli s0, s0, 33
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    or a0, s0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -287,9 +287,8 @@ define float @fabs_s(float %a, float %b) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    call __addsf3
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -301,9 +300,8 @@ define float @fabs_s(float %a, float %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    lui a0, 524288
-; RV64I-NEXT:    addiw a0, a0, -1
-; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    call __addsf3
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll
index c310ee8e316717..51df36f5eee05d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll
@@ -722,17 +722,15 @@ define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
 ; RV32IF-LABEL: fcvt_wu_s_i16:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
-; RV32IF-NEXT:    lui a1, 16
-; RV32IF-NEXT:    addi a1, a1, -1
-; RV32IF-NEXT:    and a0, a0, a1
+; RV32IF-NEXT:    slli a0, a0, 16
+; RV32IF-NEXT:    srli a0, a0, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcvt_wu_s_i16:
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
-; RV64IF-NEXT:    lui a1, 16
-; RV64IF-NEXT:    addiw a1, a1, -1
-; RV64IF-NEXT:    and a0, a0, a1
+; RV64IF-NEXT:    slli a0, a0, 48
+; RV64IF-NEXT:    srli a0, a0, 48
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_i16:
@@ -740,9 +738,8 @@ define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __fixunssfsi
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -752,9 +749,8 @@ define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __fixunssfsi
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
index 210bcc7d4c10f0..35c7fdfb33fe44 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
@@ -602,16 +602,14 @@ define float @fabs_f32(float %a) nounwind {
 ;
 ; RV32I-LABEL: fabs_f32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fabs_f32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.fabs.f32(float %a)
   ret float %1
@@ -695,8 +693,8 @@ define float @copysign_f32(float %a, float %b) nounwind {
 ; RV32I-LABEL: copysign_f32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    addi a3, a2, -1
-; RV32I-NEXT:    and a0, a0, a3
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    and a1, a1, a2
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -704,8 +702,8 @@ define float @copysign_f32(float %a, float %b) nounwind {
 ; RV64I-LABEL: copysign_f32:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a2, 524288
-; RV64I-NEXT:    addiw a3, a2, -1
-; RV64I-NEXT:    and a0, a0, a3
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -970,63 +968,61 @@ define i1 @fpclass(float %x) {
 ;
 ; RV32I-LABEL: fpclass:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    lui a2, 522240
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    slli a2, a0, 1
 ; RV32I-NEXT:    lui a3, 2048
 ; RV32I-NEXT:    lui a4, 1046528
-; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    srli a2, a2, 1
 ; RV32I-NEXT:    addi a3, a3, -1
-; RV32I-NEXT:    and a1, a0, a1
-; RV32I-NEXT:    addi a5, a1, -1
+; RV32I-NEXT:    addi a5, a2, -1
 ; RV32I-NEXT:    sltu a3, a5, a3
 ; RV32I-NEXT:    lui a5, 520192
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    add a4, a1, a4
+; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    add a4, a2, a4
 ; RV32I-NEXT:    sltu a4, a4, a5
-; RV32I-NEXT:    xor a5, a1, a2
-; RV32I-NEXT:    sltu a2, a2, a1
-; RV32I-NEXT:    seqz a1, a1
+; RV32I-NEXT:    xor a5, a2, a1
+; RV32I-NEXT:    sltu a1, a1, a2
+; RV32I-NEXT:    seqz a2, a2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    seqz a5, a5
 ; RV32I-NEXT:    and a3, a3, a0
-; RV32I-NEXT:    or a1, a1, a5
+; RV32I-NEXT:    or a2, a2, a5
 ; RV32I-NEXT:    and a0, a4, a0
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    or a2, a2, a3
 ; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    or a0, a2, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fpclass:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    lui a2, 522240
-; RV64I-NEXT:    slli a3, a0, 32
-; RV64I-NEXT:    li a4, 1
-; RV64I-NEXT:    lui a5, 2048
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    seqz a1, a0
-; RV64I-NEXT:    xor a6, a0, a2
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    slli a2, a0, 33
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    li a3, 1
+; RV64I-NEXT:    lui a4, 2048
+; RV64I-NEXT:    srli a2, a2, 33
+; RV64I-NEXT:    seqz a5, a2
+; RV64I-NEXT:    xor a6, a2, a1
 ; RV64I-NEXT:    seqz a6, a6
-; RV64I-NEXT:    or a1, a1, a6
+; RV64I-NEXT:    or a5, a5, a6
 ; RV64I-NEXT:    lui a6, 520192
-; RV64I-NEXT:    srli a3, a3, 32
-; RV64I-NEXT:    xor a3, a3, a0
-; RV64I-NEXT:    sub a4, a0, a4
-; RV64I-NEXT:    sltu a2, a2, a0
-; RV64I-NEXT:    sub a0, a0, a5
-; RV64I-NEXT:    addiw a5, a5, -1
-; RV64I-NEXT:    snez a3, a3
-; RV64I-NEXT:    slli a4, a4, 32
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a4, a4, 32
 ; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    sltu a4, a4, a5
-; RV64I-NEXT:    or a1, a1, a2
-; RV64I-NEXT:    sltu a0, a0, a6
-; RV64I-NEXT:    and a4, a4, a3
-; RV64I-NEXT:    or a1, a1, a4
-; RV64I-NEXT:    and a0, a0, a3
+; RV64I-NEXT:    xor a0, a0, a2
+; RV64I-NEXT:    sub a3, a2, a3
+; RV64I-NEXT:    sltu a1, a1, a2
+; RV64I-NEXT:    sub a2, a2, a4
+; RV64I-NEXT:    addiw a4, a4, -1
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    slli a3, a3, 32
+; RV64I-NEXT:    slli a2, a2, 32
+; RV64I-NEXT:    srli a3, a3, 32
+; RV64I-NEXT:    srli a2, a2, 32
+; RV64I-NEXT:    sltu a3, a3, a4
+; RV64I-NEXT:    or a1, a5, a1
+; RV64I-NEXT:    sltu a2, a2, a6
+; RV64I-NEXT:    and a3, a3, a0
+; RV64I-NEXT:    or a1, a1, a3
+; RV64I-NEXT:    and a0, a2, a0
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
   %cmp = call i1 @llvm.is.fpclass.f32(float %x, i32 639)
@@ -1050,20 +1046,18 @@ define i1 @isnan_fpclass(float %x) {
 ;
 ; RV32I-LABEL: isnan_fpclass:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    lui a2, 522240
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    sltu a0, a2, a0
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    sltu a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: isnan_fpclass:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    lui a2, 522240
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    sltu a0, a2, a0
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    sltu a0, a1, a0
 ; RV64I-NEXT:    ret
   %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 3)  ; nan
   ret i1 %1
@@ -1086,9 +1080,8 @@ define i1 @isqnan_fpclass(float %x) {
 ;
 ; RV32I-LABEL: isqnan_fpclass:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    lui a1, 523264
 ; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    xori a0, a0, 1
@@ -1096,9 +1089,8 @@ define i1 @isqnan_fpclass(float %x) {
 ;
 ; RV64I-LABEL: isqnan_fpclass:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    lui a1, 523264
 ; RV64I-NEXT:    sltu a0, a0, a1
 ; RV64I-NEXT:    xori a0, a0, 1
@@ -1124,26 +1116,24 @@ define i1 @issnan_fpclass(float %x) {
 ;
 ; RV32I-LABEL: issnan_fpclass:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    lui a2, 522240
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    lui a1, 523264
-; RV32I-NEXT:    sltu a2, a2, a0
-; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    and a0, a2, a0
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    lui a2, 523264
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    sltu a1, a1, a0
+; RV32I-NEXT:    sltu a0, a0, a2
+; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: issnan_fpclass:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    lui a2, 522240
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 523264
-; RV64I-NEXT:    sltu a2, a2, a0
-; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    and a0, a2, a0
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    lui a2, 523264
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    sltu a1, a1, a0
+; RV64I-NEXT:    sltu a0, a0, a2
+; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    ret
   %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 1)  ; snan
   ret i1 %1
@@ -1166,21 +1156,19 @@ define i1 @isinf_fpclass(float %x) {
 ;
 ; RV32I-LABEL: isinf_fpclass:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    lui a2, 522240
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: isinf_fpclass:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    lui a2, 522240
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    xor a0, a0, a2
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ret
   %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 516)  ; 0x204 = "inf"
@@ -1273,20 +1261,18 @@ define i1 @isfinite_fpclass(float %x) {
 ;
 ; RV32I-LABEL: isfinite_fpclass:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    lui a2, 522240
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    sltu a0, a0, a2
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: isfinite_fpclass:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    lui a2, 522240
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    sltu a0, a0, a2
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    sltu a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 504)  ; 0x1f8 = "finite"
   ret i1 %1
@@ -1341,27 +1327,25 @@ define i1 @isnegfinite_fpclass(float %x) {
 ;
 ; RV32I-LABEL: isnegfinite_fpclass:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    lui a2, 522240
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a1, a0, a1
-; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    slli a2, a0, 1
+; RV32I-NEXT:    srli a2, a2, 1
+; RV32I-NEXT:    xor a0, a0, a2
 ; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    sltu a1, a1, a2
+; RV32I-NEXT:    sltu a1, a2, a1
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: isnegfinite_fpclass:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    lui a2, 522240
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    slli a2, a0, 33
 ; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a2, a2, 33
 ; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    xor a0, a0, a2
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    sltu a1, a1, a2
+; RV64I-NEXT:    sltu a1, a2, a1
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    ret
   %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 56)  ; 0x38 = "-finite"
@@ -1385,26 +1369,24 @@ define i1 @isnotfinite_fpclass(float %x) {
 ;
 ; RV32I-LABEL: isnotfinite_fpclass:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    lui a2, 522240
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    xor a1, a0, a2
-; RV32I-NEXT:    seqz a1, a1
-; RV32I-NEXT:    sltu a0, a2, a0
-; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    xor a2, a0, a1
+; RV32I-NEXT:    seqz a2, a2
+; RV32I-NEXT:    sltu a0, a1, a0
+; RV32I-NEXT:    or a0, a2, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: isnotfinite_fpclass:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    lui a2, 522240
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    xor a1, a0, a2
-; RV64I-NEXT:    seqz a1, a1
-; RV64I-NEXT:    sltu a0, a2, a0
-; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    xor a2, a0, a1
+; RV64I-NEXT:    seqz a2, a2
+; RV64I-NEXT:    sltu a0, a1, a0
+; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
   %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 519)  ; ox207 = "inf|nan"
   ret i1 %1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
index 9f5c013c9ccd4a..2fc25fbb39bb9b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
@@ -98,9 +98,8 @@ define fp128 @fneg(fp128 %x) {
 define fp128 @fabs(fp128 %x) {
 ; CHECK-LABEL: fabs:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a2, -1
-; CHECK-NEXT:    srli a2, a2, 1
-; CHECK-NEXT:    and a1, a1, a2
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    srli a1, a1, 1
 ; CHECK-NEXT:    ret
   %a = call fp128 @llvm.fabs.f128(fp128 %x)
   ret fp128 %a
@@ -110,11 +109,11 @@ define fp128 @fcopysign(fp128 %x, fp128 %y) {
 ; CHECK-LABEL: fcopysign:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a2, -1
-; CHECK-NEXT:    slli a4, a2, 63
-; CHECK-NEXT:    srli a2, a2, 1
-; CHECK-NEXT:    and a1, a1, a2
-; CHECK-NEXT:    and a3, a3, a4
-; CHECK-NEXT:    or a1, a1, a3
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    slli a2, a2, 63
+; CHECK-NEXT:    srli a1, a1, 1
+; CHECK-NEXT:    and a2, a3, a2
+; CHECK-NEXT:    or a1, a1, a2
 ; CHECK-NEXT:    ret
   %a = call fp128 @llvm.copysign.f128(fp128 %x, fp128 %y)
   ret fp128 %a
@@ -486,9 +485,8 @@ define fp128 @uitofp_i16(i16 %x) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    lui a1, 16
-; CHECK-NEXT:    addiw a1, a1, -1
-; CHECK-NEXT:    and a0, a0, a1
+; CHECK-NEXT:    slli a0, a0, 48
+; CHECK-NEXT:    srli a0, a0, 48
 ; CHECK-NEXT:    call __floatunsitf
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir
index 39d0ee7c382dfc..e21be2afebd205 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir
@@ -102,10 +102,9 @@ body:             |
     ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
     ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
     ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
-    ; RV32I-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 16
-    ; RV32I-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[LUI]], -1
-    ; RV32I-NEXT: [[AND:%[0-9]+]]:gpr = AND [[ADD]], [[ADDI]]
-    ; RV32I-NEXT: $x10 = COPY [[AND]]
+    ; RV32I-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[ADD]], 16
+    ; RV32I-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[SLLI]], 16
+    ; RV32I-NEXT: $x10 = COPY [[SRLI]]
     ; RV32I-NEXT: PseudoRET implicit $x10
     %0:gprb(s32) = COPY $x10
     %1:gprb(s32) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
index 68bf9240ccd1df..ababec16f7f8fa 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
@@ -361,9 +361,8 @@ define i8 @srai_i8(i8 %a) nounwind {
 define i16 @srli_i16(i16 %a) nounwind {
 ; RV32I-LABEL: srli_i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 6
 ; RV32I-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll
index 7f22127ad3536c..338925059862cd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll
@@ -885,9 +885,8 @@ define i64 @abs_i64(i64 %x) {
 define i32 @zexth_i32(i32 %a) nounwind {
 ; RV32I-LABEL: zexth_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: zexth_i32:
@@ -901,9 +900,8 @@ define i32 @zexth_i32(i32 %a) nounwind {
 define i64 @zexth_i64(i64 %a) nounwind {
 ; RV32I-LABEL: zexth_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
index a647eae82dddfc..b214cf68ddce86 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
@@ -7,9 +7,8 @@
 define i32 @pack_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: pack_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 16
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    slli a1, a1, 16
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    ret
@@ -257,9 +256,8 @@ define void @packh_i16_3(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, ptr %p) {
 define i32 @zexth_i32(i32 %a) nounwind {
 ; RV32I-LABEL: zexth_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBKB-LABEL: zexth_i32:
@@ -273,9 +271,8 @@ define i32 @zexth_i32(i32 %a) nounwind {
 define i64 @zexth_i64(i64 %a) nounwind {
 ; RV32I-LABEL: zexth_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
@@ -291,9 +288,8 @@ define i64 @zexth_i64(i64 %a) nounwind {
 define i32 @zext_i16_to_i32(i16 %a) nounwind {
 ; RV32I-LABEL: zext_i16_to_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBKB-LABEL: zext_i16_to_i32:
@@ -307,9 +303,8 @@ define i32 @zext_i16_to_i32(i16 %a) nounwind {
 define i64 @zext_i16_to_i64(i16 %a) nounwind {
 ; RV32I-LABEL: zext_i16_to_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll
index 9584270d8e66f5..993ba19caa6b4b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zba.ll
@@ -1520,19 +1520,17 @@ entry:
 define i64 @srli_slli_i16(i64 %1) {
 ; RV64I-LABEL: srli_slli_i16:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    slli a0, a0, 4
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBANOZBB-LABEL: srli_slli_i16:
 ; RV64ZBANOZBB:       # %bb.0: # %entry
-; RV64ZBANOZBB-NEXT:    lui a1, 16
-; RV64ZBANOZBB-NEXT:    addiw a1, a1, -1
 ; RV64ZBANOZBB-NEXT:    srli a0, a0, 2
-; RV64ZBANOZBB-NEXT:    and a0, a0, a1
+; RV64ZBANOZBB-NEXT:    slli a0, a0, 48
+; RV64ZBANOZBB-NEXT:    srli a0, a0, 48
 ; RV64ZBANOZBB-NEXT:    slli a0, a0, 4
 ; RV64ZBANOZBB-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
index 51a84f5e93e31c..a29219bfde06bb 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
@@ -439,9 +439,8 @@ define i8 @srai_i8(i8 %a) nounwind {
 define i16 @srli_i16(i16 %a) nounwind {
 ; RV64I-LABEL: srli_i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 6
 ; RV64I-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
index 25700b165e8ac8..4e12644a22b428 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
@@ -1146,9 +1146,8 @@ define i64 @abs_i64(i64 %x) {
 define i32 @zexth_i32(i32 %a) nounwind {
 ; RV64I-LABEL: zexth_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: zexth_i32:
@@ -1162,9 +1161,8 @@ define i32 @zexth_i32(i32 %a) nounwind {
 define i64 @zexth_i64(i64 %a) nounwind {
 ; RV64I-LABEL: zexth_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: zexth_i64:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
index 09ee5caa12c1c0..bf430c618afca2 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
@@ -8,12 +8,10 @@
 define signext i32 @pack_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: pack_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a2, 16
-; RV64I-NEXT:    addi a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a1, a1, 16
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    slliw a1, a1, 16
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBKB-LABEL: pack_i32:
@@ -334,9 +332,8 @@ define signext i32 @pack_i32_allWUsers(i16 zeroext %0, i16 zeroext %1, i16 zeroe
 ; RV64I-LABEL: pack_i32_allWUsers:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    slli a0, a0, 16
 ; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    sext.w a0, a0
@@ -382,9 +379,8 @@ define i64 @pack_i64_imm() {
 define i32 @zexth_i32(i32 %a) nounwind {
 ; RV64I-LABEL: zexth_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBKB-LABEL: zexth_i32:
@@ -398,9 +394,8 @@ define i32 @zexth_i32(i32 %a) nounwind {
 define i64 @zexth_i64(i64 %a) nounwind {
 ; RV64I-LABEL: zexth_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBKB-LABEL: zexth_i64:
@@ -414,9 +409,8 @@ define i64 @zexth_i64(i64 %a) nounwind {
 define i32 @zext_i16_to_i32(i16 %a) nounwind {
 ; RV64I-LABEL: zext_i16_to_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBKB-LABEL: zext_i16_to_i32:
@@ -430,9 +424,8 @@ define i32 @zext_i16_to_i32(i16 %a) nounwind {
 define i64 @zext_i16_to_i64(i16 %a) nounwind {
 ; RV64I-LABEL: zext_i16_to_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBKB-LABEL: zext_i16_to_i64:



More information about the llvm-commits mailing list