[llvm] [GISel][RISCV] Use isSExtCheaperThanZExt when widening G_ICMP. (PR #120032)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Dec 15 18:58:19 PST 2024


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/120032

Sign extending i32->i64 is more efficient than zero extend for RV64.

>From 29df9b266a1cd78505716bbe18df4526c94fc9a5 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Sun, 15 Dec 2024 17:59:48 -0800
Subject: [PATCH] [GISel][RISCV] Use isSExtCheaperThanZExt when widening
 G_ICMP.

Sign extending i32->i64 is more efficient than zero extend for RV64.
---
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    |  15 ++-
 .../CodeGen/RISCV/GlobalISel/double-arith.ll  |   3 +-
 .../RISCV/GlobalISel/double-convert.ll        |  30 ++---
 .../CodeGen/RISCV/GlobalISel/double-fcmp.ll   |  36 +++---
 .../CodeGen/RISCV/GlobalISel/float-arith.ll   |   3 +-
 .../CodeGen/RISCV/GlobalISel/float-convert.ll |  30 ++---
 .../CodeGen/RISCV/GlobalISel/float-fcmp.ll    |  24 ++--
 .../RISCV/GlobalISel/float-intrinsics.ll      |  46 +++----
 llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll   |   3 +-
 .../legalizer/legalize-addo-subo-rv64.mir     |  14 +--
 .../legalizer/legalize-icmp-rv64.mir          |  42 +++----
 .../legalizer/legalize-sat-rv64.mir           |  22 ++--
 .../legalizer/legalize-umax-rv64.mir          |   7 +-
 .../legalizer/legalize-umin-rv64.mir          |   7 +-
 llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll | 113 +++++++-----------
 llvm/test/CodeGen/RISCV/GlobalISel/ucmp.ll    |  40 +++----
 16 files changed, 174 insertions(+), 261 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 2fc8ef6a52a528..6dc4c2d54196e4 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -3077,10 +3077,17 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
     if (TypeIdx == 0)
       widenScalarDst(MI, WideTy);
     else {
-      unsigned ExtOpcode = CmpInst::isSigned(static_cast<CmpInst::Predicate>(
-                               MI.getOperand(1).getPredicate()))
-                               ? TargetOpcode::G_SEXT
-                               : TargetOpcode::G_ZEXT;
+      LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
+      CmpInst::Predicate Pred =
+          static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
+
+      auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
+      unsigned ExtOpcode =
+          (CmpInst::isSigned(Pred) ||
+           TLI.isSExtCheaperThanZExt(getApproximateEVTForLLT(SrcTy, Ctx),
+                                     getApproximateEVTForLLT(WideTy, Ctx)))
+              ? TargetOpcode::G_SEXT
+              : TargetOpcode::G_ZEXT;
       widenScalarSrc(MI, WideTy, 2, ExtOpcode);
       widenScalarSrc(MI, WideTy, 3, ExtOpcode);
     }
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
index 66eb4372aefadb..cb2037f5fb0271 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
@@ -215,8 +215,7 @@ define i32 @fneg_d(double %a, double %b) nounwind {
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a1, a0, a1
 ; RV64I-NEXT:    call __eqdf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll
index 81d3381449bc87..aeed219d99555d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll
@@ -117,25 +117,14 @@ define i32 @fcvt_wu_d(double %a) nounwind {
 }
 
 define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
-; RV32IFD-LABEL: fcvt_wu_d_multiple_use:
-; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
-; RV32IFD-NEXT:    bnez a0, .LBB4_2
-; RV32IFD-NEXT:  # %bb.1:
-; RV32IFD-NEXT:    li a0, 1
-; RV32IFD-NEXT:  .LBB4_2:
-; RV32IFD-NEXT:    ret
-;
-; RV64IFD-LABEL: fcvt_wu_d_multiple_use:
-; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
-; RV64IFD-NEXT:    slli a1, a0, 32
-; RV64IFD-NEXT:    srli a1, a1, 32
-; RV64IFD-NEXT:    bnez a1, .LBB4_2
-; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    li a0, 1
-; RV64IFD-NEXT:  .LBB4_2:
-; RV64IFD-NEXT:    ret
+; CHECKIFD-LABEL: fcvt_wu_d_multiple_use:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rtz
+; CHECKIFD-NEXT:    bnez a0, .LBB4_2
+; CHECKIFD-NEXT:  # %bb.1:
+; CHECKIFD-NEXT:    li a0, 1
+; CHECKIFD-NEXT:  .LBB4_2:
+; CHECKIFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_d_multiple_use:
 ; RV32I:       # %bb.0:
@@ -155,8 +144,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __fixunsdfsi
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sext.w a1, a0
 ; RV64I-NEXT:    bnez a1, .LBB4_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    li a0, 1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-fcmp.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-fcmp.ll
index 7ef1af22370a1a..dfa76a2e1531be 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-fcmp.ll
@@ -28,7 +28,7 @@ define i32 @fcmp_false(double %a, double %b) nounwind {
   ret i32 %2
 }
 
-; FIXME: slli+srli on RV64 are unnecessary 
+; FIXME: slli+srli on RV64 are unnecessary
 define i32 @fcmp_oeq(double %a, double %b) nounwind {
 ; CHECKIFD-LABEL: fcmp_oeq:
 ; CHECKIFD:       # %bb.0:
@@ -50,8 +50,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __eqdf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -194,7 +193,7 @@ define i32 @fcmp_ole(double %a, double %b) nounwind {
   ret i32 %2
 }
 
-; FIXME: slli+srli on RV64 are unnecessary 
+; FIXME: slli+srli on RV64 are unnecessary
 define i32 @fcmp_one(double %a, double %b) nounwind {
 ; CHECKIFD-LABEL: fcmp_one:
 ; CHECKIFD:       # %bb.0:
@@ -244,14 +243,12 @@ define i32 @fcmp_one(double %a, double %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    call __eqdf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    snez s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    call __unorddf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    and a0, s2, a0
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -265,7 +262,7 @@ define i32 @fcmp_one(double %a, double %b) nounwind {
   ret i32 %2
 }
 
-; FIXME: slli+srli on RV64 are unnecessary 
+; FIXME: slli+srli on RV64 are unnecessary
 define i32 @fcmp_ord(double %a, double %b) nounwind {
 ; CHECKIFD-LABEL: fcmp_ord:
 ; CHECKIFD:       # %bb.0:
@@ -289,8 +286,7 @@ define i32 @fcmp_ord(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __unorddf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -300,7 +296,7 @@ define i32 @fcmp_ord(double %a, double %b) nounwind {
   ret i32 %2
 }
 
-; FIXME: slli+srli on RV64 are unnecessary 
+; FIXME: slli+srli on RV64 are unnecessary
 define i32 @fcmp_ueq(double %a, double %b) nounwind {
 ; CHECKIFD-LABEL: fcmp_ueq:
 ; CHECKIFD:       # %bb.0:
@@ -351,14 +347,12 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    call __eqdf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    seqz s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    call __unorddf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    or a0, s2, a0
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -508,7 +502,7 @@ define i32 @fcmp_ule(double %a, double %b) nounwind {
   ret i32 %2
 }
 
-; FIXME: slli+srli on RV64 are unnecessary 
+; FIXME: slli+srli on RV64 are unnecessary
 define i32 @fcmp_une(double %a, double %b) nounwind {
 ; CHECKIFD-LABEL: fcmp_une:
 ; CHECKIFD:       # %bb.0:
@@ -531,8 +525,7 @@ define i32 @fcmp_une(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __nedf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -542,7 +535,7 @@ define i32 @fcmp_une(double %a, double %b) nounwind {
   ret i32 %2
 }
 
-; FIXME: slli+srli on RV64 are unnecessary 
+; FIXME: slli+srli on RV64 are unnecessary
 define i32 @fcmp_uno(double %a, double %b) nounwind {
 ; CHECKIFD-LABEL: fcmp_uno:
 ; CHECKIFD:       # %bb.0:
@@ -567,8 +560,7 @@ define i32 @fcmp_uno(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __unorddf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll
index 3a60856665742a..fdeda0c273f6d0 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll
@@ -210,8 +210,7 @@ define i32 @fneg_s(float %a, float %b) nounwind {
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a1, a0, a1
 ; RV64I-NEXT:    call __eqsf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll
index 51df36f5eee05d..1820ecf3b5056c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll
@@ -65,25 +65,14 @@ define i32 @fcvt_wu_s(float %a) nounwind {
 ; Test where the fptoui has multiple uses, one of which causes a sext to be
 ; inserted on RV64.
 define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
-; RV32IF-LABEL: fcvt_wu_s_multiple_use:
-; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
-; RV32IF-NEXT:    bnez a0, .LBB2_2
-; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    li a0, 1
-; RV32IF-NEXT:  .LBB2_2:
-; RV32IF-NEXT:    ret
-;
-; RV64IF-LABEL: fcvt_wu_s_multiple_use:
-; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
-; RV64IF-NEXT:    slli a1, a0, 32
-; RV64IF-NEXT:    srli a1, a1, 32
-; RV64IF-NEXT:    bnez a1, .LBB2_2
-; RV64IF-NEXT:  # %bb.1:
-; RV64IF-NEXT:    li a0, 1
-; RV64IF-NEXT:  .LBB2_2:
-; RV64IF-NEXT:    ret
+; CHECKIF-LABEL: fcvt_wu_s_multiple_use:
+; CHECKIF:       # %bb.0:
+; CHECKIF-NEXT:    fcvt.wu.s a0, fa0, rtz
+; CHECKIF-NEXT:    bnez a0, .LBB2_2
+; CHECKIF-NEXT:  # %bb.1:
+; CHECKIF-NEXT:    li a0, 1
+; CHECKIF-NEXT:  .LBB2_2:
+; CHECKIF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcvt_wu_s_multiple_use:
 ; RV32I:       # %bb.0:
@@ -103,8 +92,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __fixunssfsi
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sext.w a1, a0
 ; RV64I-NEXT:    bnez a1, .LBB2_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    li a0, 1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-fcmp.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-fcmp.ll
index bdd779d4761099..475b67bda9ae93 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-fcmp.ll
@@ -50,8 +50,7 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __eqsf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -236,14 +235,12 @@ define i32 @fcmp_one(float %a, float %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    call __eqsf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    snez s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    call __unordsf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    and a0, s2, a0
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -281,8 +278,7 @@ define i32 @fcmp_ord(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __unordsf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -335,14 +331,12 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv s1, a1
 ; RV64I-NEXT:    call __eqsf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    seqz s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    call __unordsf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    or a0, s2, a0
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -516,8 +510,7 @@ define i32 @fcmp_une(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __nesf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -552,8 +545,7 @@ define i32 @fcmp_uno(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __unordsf2
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
index 35c7fdfb33fe44..05730a710b4d8e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll
@@ -997,32 +997,27 @@ define i1 @fpclass(float %x) {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a1, 522240
 ; RV64I-NEXT:    slli a2, a0, 33
-; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    li a3, 1
 ; RV64I-NEXT:    lui a4, 2048
+; RV64I-NEXT:    lui a5, 520192
 ; RV64I-NEXT:    srli a2, a2, 33
-; RV64I-NEXT:    seqz a5, a2
-; RV64I-NEXT:    xor a6, a2, a1
-; RV64I-NEXT:    seqz a6, a6
-; RV64I-NEXT:    or a5, a5, a6
-; RV64I-NEXT:    lui a6, 520192
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    addiw a6, a4, -1
 ; RV64I-NEXT:    xor a0, a0, a2
-; RV64I-NEXT:    sub a3, a2, a3
+; RV64I-NEXT:    subw a3, a2, a3
+; RV64I-NEXT:    sltu a3, a3, a6
+; RV64I-NEXT:    xor a6, a2, a1
 ; RV64I-NEXT:    sltu a1, a1, a2
-; RV64I-NEXT:    sub a2, a2, a4
-; RV64I-NEXT:    addiw a4, a4, -1
+; RV64I-NEXT:    subw a4, a2, a4
+; RV64I-NEXT:    seqz a2, a2
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    slli a3, a3, 32
-; RV64I-NEXT:    slli a2, a2, 32
-; RV64I-NEXT:    srli a3, a3, 32
-; RV64I-NEXT:    srli a2, a2, 32
-; RV64I-NEXT:    sltu a3, a3, a4
-; RV64I-NEXT:    or a1, a5, a1
-; RV64I-NEXT:    sltu a2, a2, a6
+; RV64I-NEXT:    seqz a6, a6
+; RV64I-NEXT:    sltu a4, a4, a5
 ; RV64I-NEXT:    and a3, a3, a0
+; RV64I-NEXT:    or a2, a2, a6
+; RV64I-NEXT:    or a1, a2, a1
 ; RV64I-NEXT:    or a1, a1, a3
-; RV64I-NEXT:    and a0, a2, a0
+; RV64I-NEXT:    and a0, a4, a0
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
   %cmp = call i1 @llvm.is.fpclass.f32(float %x, i32 639)
@@ -1200,8 +1195,7 @@ define i1 @isposinf_fpclass(float %x) {
 ; RV64I-LABEL: isposinf_fpclass:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a1, 522240
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ret
@@ -1233,10 +1227,8 @@ define i1 @isneginf_fpclass(float %x) {
 ;
 ; RV64I-LABEL: isneginf_fpclass:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    li a1, 511
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    slli a1, a1, 23
+; RV64I-NEXT:    lui a1, 1046528
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    seqz a0, a0
 ; RV64I-NEXT:    ret
@@ -1302,8 +1294,7 @@ define i1 @isposfinite_fpclass(float %x) {
 ; RV64I-LABEL: isposfinite_fpclass:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a1, 522240
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sltu a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 448)  ; 0x1c0 = "+finite"
@@ -1340,9 +1331,8 @@ define i1 @isnegfinite_fpclass(float %x) {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a1, 522240
 ; RV64I-NEXT:    slli a2, a0, 33
-; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    srli a2, a2, 33
-; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    xor a0, a0, a2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    sltu a1, a2, a1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
index eb48c90e14f803..978a6b0dc024c1 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
@@ -124,8 +124,7 @@ define i1 @fcmp(fp128 %x, fp128 %y) nounwind {
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    call __eqtf2
-; CHECK-NEXT:    slli a0, a0, 32
-; CHECK-NEXT:    srli a0, a0, 32
+; CHECK-NEXT:    sext.w a0, a0
 ; CHECK-NEXT:    seqz a0, a0
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
index b815c37401716e..f2ec70933261eb 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
@@ -336,10 +336,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND]](s64), [[AND1]]
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+    ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
     ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
     ; CHECK-NEXT: $x11 = COPY [[ICMP]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
@@ -455,10 +454,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND]](s64), [[AND1]]
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+    ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
     ; CHECK-NEXT: $x10 = COPY [[SUB]](s64)
     ; CHECK-NEXT: $x11 = COPY [[ICMP]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-icmp-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-icmp-rv64.mir
index 74d745732d59d7..d7b9a4a0f42680 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-icmp-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-icmp-rv64.mir
@@ -805,10 +805,9 @@ body:             |
     ; CHECK-LABEL: name: cmp_ugt_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ugt), [[AND]](s64), [[AND1]]
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+    ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ugt), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
     ; CHECK-NEXT: $x10 = COPY [[ICMP]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s64) = COPY $x10
@@ -983,10 +982,9 @@ body:             |
     ; CHECK-LABEL: name: cmp_ult_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND]](s64), [[AND1]]
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+    ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
     ; CHECK-NEXT: $x10 = COPY [[ICMP]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s64) = COPY $x10
@@ -1161,10 +1159,9 @@ body:             |
     ; CHECK-LABEL: name: cmp_uge_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(uge), [[AND]](s64), [[AND1]]
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+    ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(uge), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
     ; CHECK-NEXT: $x10 = COPY [[ICMP]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s64) = COPY $x10
@@ -1339,10 +1336,9 @@ body:             |
     ; CHECK-LABEL: name: cmp_ule_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ule), [[AND]](s64), [[AND1]]
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+    ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ule), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
     ; CHECK-NEXT: $x10 = COPY [[ICMP]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s64) = COPY $x10
@@ -1517,10 +1513,9 @@ body:             |
     ; CHECK-LABEL: name: cmp_eq_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[AND]](s64), [[AND1]]
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+    ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
     ; CHECK-NEXT: $x10 = COPY [[ICMP]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s64) = COPY $x10
@@ -1691,10 +1686,9 @@ body:             |
     ; CHECK-LABEL: name: cmp_ne_i32
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[AND]](s64), [[AND1]]
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+    ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
     ; CHECK-NEXT: $x10 = COPY [[ICMP]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s64) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
index 54a3cb63545d58..bf8c8d690f0761 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
@@ -17,13 +17,12 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ADD]](s64)
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND]](s64), [[AND1]]
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+    ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C1]](s64)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
     ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC1]], [[COPY2]]
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
@@ -175,12 +174,11 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]]
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[SUB]](s64)
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND]](s64), [[AND1]]
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C1]](s64)
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+    ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
     ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC1]], [[TRUNC]]
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-umax-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-umax-rv64.mir
index 17f99ecc1e98a2..0a2331debd347d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-umax-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-umax-rv64.mir
@@ -88,10 +88,9 @@ body:             |
     ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ugt), [[AND]](s64), [[AND1]]
+    ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+    ; RV64I-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ugt), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
     ; RV64I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC]], [[TRUNC1]]
     ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[SELECT]](s32)
     ; RV64I-NEXT: $x10 = COPY [[ZEXT]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-umin-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-umin-rv64.mir
index 33fac8ec13d64f..78673a1dcdda63 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-umin-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-umin-rv64.mir
@@ -88,10 +88,9 @@ body:             |
     ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND]](s64), [[AND1]]
+    ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+    ; RV64I-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+    ; RV64I-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
     ; RV64I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC]], [[TRUNC1]]
     ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[SELECT]](s32)
     ; RV64I-NEXT: $x10 = COPY [[ZEXT]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
index 4e12644a22b428..7a0d5f3bf8cf74 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
@@ -10,8 +10,7 @@ declare i32 @llvm.ctlz.i32(i32, i1)
 define signext i32 @ctlz_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: ctlz_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sext.w a1, a0
 ; RV64I-NEXT:    beqz a1, .LBB0_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi sp, sp, -16
@@ -69,8 +68,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sext.w a1, a0
 ; RV64I-NEXT:    li s0, 31
 ; RV64I-NEXT:    beqz a1, .LBB1_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
@@ -136,8 +134,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    li s0, 32
 ; RV64I-NEXT:    addi a0, a0, -1
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a2, a1, 32
+; RV64I-NEXT:    sext.w a2, a0
 ; RV64I-NEXT:    li a1, 32
 ; RV64I-NEXT:    beqz a2, .LBB2_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
@@ -199,36 +196,35 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srliw a2, a0, 1
-; RV64I-NEXT:    lui a3, 349525
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    addiw a2, a3, 1365
-; RV64I-NEXT:    srliw a3, a0, 2
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    srliw a3, a0, 4
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    srliw a3, a0, 8
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    srliw a3, a0, 16
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    srliw a3, a0, 1
-; RV64I-NEXT:    and a2, a3, a2
-; RV64I-NEXT:    lui a3, 209715
-; RV64I-NEXT:    addiw a3, a3, 819
-; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    mv s0, a0
+; RV64I-NEXT:    srliw a0, a0, 1
+; RV64I-NEXT:    lui a1, 349525
+; RV64I-NEXT:    or a0, s0, a0
+; RV64I-NEXT:    addiw a1, a1, 1365
 ; RV64I-NEXT:    srliw a2, a0, 2
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    lui a3, 61681
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    lui a2, 4112
-; RV64I-NEXT:    srli s0, a1, 32
-; RV64I-NEXT:    addiw a1, a3, -241
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    addiw a1, a2, 257
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    srliw a2, a0, 4
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    srliw a2, a0, 8
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    srliw a2, a0, 16
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    srliw a2, a0, 1
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    lui a2, 209715
+; RV64I-NEXT:    addiw a2, a2, 819
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    srliw a1, a0, 2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    lui a2, 61681
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    lui a1, 4112
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    call __muldi3
 ; RV64I-NEXT:    beqz s0, .LBB3_2
 ; RV64I-NEXT:  # %bb.1:
@@ -247,9 +243,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
 ;
 ; RV64ZBB-LABEL: findLastSet_i32:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    slli a1, a0, 32
-; RV64ZBB-NEXT:    srli a1, a1, 32
-; RV64ZBB-NEXT:    beqz a1, .LBB3_2
+; RV64ZBB-NEXT:    beqz a0, .LBB3_2
 ; RV64ZBB-NEXT:  # %bb.1:
 ; RV64ZBB-NEXT:    clzw a0, a0
 ; RV64ZBB-NEXT:    xori a0, a0, 31
@@ -405,8 +399,7 @@ declare i32 @llvm.cttz.i32(i32, i1)
 define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: cttz_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sext.w a1, a0
 ; RV64I-NEXT:    beqz a1, .LBB6_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi sp, sp, -16
@@ -518,8 +511,6 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    slli s0, s0, 32
-; RV64I-NEXT:    srli s0, s0, 32
 ; RV64I-NEXT:    beqz s0, .LBB8_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    srliw a0, a0, 24
@@ -534,9 +525,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ;
 ; RV64ZBB-LABEL: findFirstSet_i32:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    slli a1, a0, 32
-; RV64ZBB-NEXT:    srli a1, a1, 32
-; RV64ZBB-NEXT:    beqz a1, .LBB8_2
+; RV64ZBB-NEXT:    beqz a0, .LBB8_2
 ; RV64ZBB-NEXT:  # %bb.1:
 ; RV64ZBB-NEXT:    ctzw a0, a0
 ; RV64ZBB-NEXT:    ret
@@ -578,15 +567,13 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    slli s0, s0, 32
-; RV64I-NEXT:    srli s0, s0, 32
-; RV64I-NEXT:    li a1, 0
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    beqz s0, .LBB9_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    srliw a1, a0, 24
-; RV64I-NEXT:    addiw a1, a1, 1
+; RV64I-NEXT:    srliw a0, a1, 24
+; RV64I-NEXT:    addiw a0, a0, 1
 ; RV64I-NEXT:  .LBB9_2:
-; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -594,15 +581,13 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ;
 ; RV64ZBB-LABEL: ffs_i32:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    slli a1, a0, 32
-; RV64ZBB-NEXT:    srli a2, a1, 32
-; RV64ZBB-NEXT:    li a1, 0
-; RV64ZBB-NEXT:    beqz a2, .LBB9_2
+; RV64ZBB-NEXT:    mv a1, a0
+; RV64ZBB-NEXT:    li a0, 0
+; RV64ZBB-NEXT:    beqz a1, .LBB9_2
 ; RV64ZBB-NEXT:  # %bb.1:
-; RV64ZBB-NEXT:    ctzw a1, a0
-; RV64ZBB-NEXT:    addiw a1, a1, 1
+; RV64ZBB-NEXT:    ctzw a0, a1
+; RV64ZBB-NEXT:    addiw a0, a0, 1
 ; RV64ZBB-NEXT:  .LBB9_2:
-; RV64ZBB-NEXT:    mv a0, a1
 ; RV64ZBB-NEXT:    ret
   %1 = call i32 @llvm.cttz.i32(i32 %a, i1 true)
   %2 = add i32 %1, 1
@@ -993,11 +978,7 @@ define i64 @max_i64(i64 %a, i64 %b) nounwind {
 define signext i32 @minu_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: minu_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a2, a0, 32
-; RV64I-NEXT:    slli a3, a1, 32
-; RV64I-NEXT:    srli a2, a2, 32
-; RV64I-NEXT:    srli a3, a3, 32
-; RV64I-NEXT:    bltu a2, a3, .LBB23_2
+; RV64I-NEXT:    bltu a0, a1, .LBB23_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:  .LBB23_2:
@@ -1041,11 +1022,7 @@ define i64 @minu_i64(i64 %a, i64 %b) nounwind {
 define signext i32 @maxu_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: maxu_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a2, a0, 32
-; RV64I-NEXT:    slli a3, a1, 32
-; RV64I-NEXT:    srli a2, a2, 32
-; RV64I-NEXT:    srli a3, a3, 32
-; RV64I-NEXT:    bltu a3, a2, .LBB25_2
+; RV64I-NEXT:    bltu a1, a0, .LBB25_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:  .LBB25_2:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/ucmp.ll b/llvm/test/CodeGen/RISCV/GlobalISel/ucmp.ll
index e2a95eb974342d..939a7e255b8aea 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/ucmp.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/ucmp.ll
@@ -93,18 +93,16 @@ define i8 @ucmp.8.32(i32 %x, i32 %y) nounwind {
 ;
 ; RV64I-LABEL: ucmp.8.32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    slli a2, a1, 32
-; RV64I-NEXT:    srli a1, a0, 32
-; RV64I-NEXT:    srli a2, a2, 32
-; RV64I-NEXT:    bltu a2, a1, .LBB2_2
+; RV64I-NEXT:    sext.w a2, a0
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    bltu a1, a2, .LBB2_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    li a0, 0
-; RV64I-NEXT:    bltu a1, a2, .LBB2_3
+; RV64I-NEXT:    bltu a2, a1, .LBB2_3
 ; RV64I-NEXT:    j .LBB2_4
 ; RV64I-NEXT:  .LBB2_2:
 ; RV64I-NEXT:    li a0, 1
-; RV64I-NEXT:    bgeu a1, a2, .LBB2_4
+; RV64I-NEXT:    bgeu a2, a1, .LBB2_4
 ; RV64I-NEXT:  .LBB2_3:
 ; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:  .LBB2_4:
@@ -179,18 +177,16 @@ define i32 @ucmp.32.32(i32 %x, i32 %y) nounwind {
 ;
 ; RV64I-LABEL: ucmp.32.32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    slli a2, a1, 32
-; RV64I-NEXT:    srli a1, a0, 32
-; RV64I-NEXT:    srli a2, a2, 32
-; RV64I-NEXT:    bltu a2, a1, .LBB4_2
+; RV64I-NEXT:    sext.w a2, a0
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    bltu a1, a2, .LBB4_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    li a0, 0
-; RV64I-NEXT:    bltu a1, a2, .LBB4_3
+; RV64I-NEXT:    bltu a2, a1, .LBB4_3
 ; RV64I-NEXT:    j .LBB4_4
 ; RV64I-NEXT:  .LBB4_2:
 ; RV64I-NEXT:    li a0, 1
-; RV64I-NEXT:    bgeu a1, a2, .LBB4_4
+; RV64I-NEXT:    bgeu a2, a1, .LBB4_4
 ; RV64I-NEXT:  .LBB4_3:
 ; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:  .LBB4_4:
@@ -218,18 +214,15 @@ define i32 @ucmp.32.32_sext(i32 signext %x, i32 signext %y) nounwind {
 ;
 ; RV64I-LABEL: ucmp.32.32_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    slli a2, a1, 32
-; RV64I-NEXT:    srli a1, a0, 32
-; RV64I-NEXT:    srli a2, a2, 32
-; RV64I-NEXT:    bltu a2, a1, .LBB5_2
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    bltu a1, a0, .LBB5_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    li a0, 0
-; RV64I-NEXT:    bltu a1, a2, .LBB5_3
+; RV64I-NEXT:    bltu a2, a1, .LBB5_3
 ; RV64I-NEXT:    j .LBB5_4
 ; RV64I-NEXT:  .LBB5_2:
 ; RV64I-NEXT:    li a0, 1
-; RV64I-NEXT:    bgeu a1, a2, .LBB5_4
+; RV64I-NEXT:    bgeu a2, a1, .LBB5_4
 ; RV64I-NEXT:  .LBB5_3:
 ; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:  .LBB5_4:
@@ -257,8 +250,9 @@ define i32 @ucmp.32.32_zext(i32 zeroext %x, i32 zeroext %y) nounwind {
 ;
 ; RV64I-LABEL: ucmp.32.32_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    mv a2, a0
-; RV64I-NEXT:    bltu a1, a0, .LBB6_2
+; RV64I-NEXT:    sext.w a2, a0
+; RV64I-NEXT:    sext.w a1, a1
+; RV64I-NEXT:    bltu a1, a2, .LBB6_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    bltu a2, a1, .LBB6_3



More information about the llvm-commits mailing list