[llvm] d660c0d - [RISCV] Optimize LI+SLT to SLTI+XORI for immediates in specific range

Ben Shi via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 28 23:47:03 PDT 2022


Author: Liqin Weng
Date: 2022-03-29T14:46:49+08:00
New Revision: d660c0d7938f415188277ced580eab24fc8e7809

URL: https://github.com/llvm/llvm-project/commit/d660c0d7938f415188277ced580eab24fc8e7809
DIFF: https://github.com/llvm/llvm-project/commit/d660c0d7938f415188277ced580eab24fc8e7809.diff

LOG: [RISCV] Optimize LI+SLT to SLTI+XORI for immediates in specific range

This transform will reduce one GPR.

Reviewed By: craig.topper, benshi001

Differential Revision: https://reviews.llvm.org/D122051

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
    llvm/test/CodeGen/RISCV/double-fcmp.ll
    llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
    llvm/test/CodeGen/RISCV/float-fcmp.ll
    llvm/test/CodeGen/RISCV/i32-icmp.ll
    llvm/test/CodeGen/RISCV/select-constant-xor.ll
    llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
    llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/xaluo.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 8d5064dc5af07..2e65185a0b1d7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -347,10 +347,19 @@ def ixlenimm_li : Operand<XLenVT> {
 
 // Standalone (codegen-only) immleaf patterns.
 
-// A 12-bit signed immediate plus one where the imm range will be -2047~2048.
+// A 12-bit signed immediate plus one where the imm range will be [-2047, 2048].
 def simm12_plus1 : ImmLeaf<XLenVT,
   [{return (isInt<12>(Imm) && Imm != -2048) || Imm == 2048;}]>;
 
+// A 12-bit signed immediate sub one and exclude zero
+def simm12_sub1_nonzero : PatLeaf<(imm), [{
+  if (!N->hasOneUse())
+    return false;
+  // The immediate operand must be in range [-2049, 0) or (0, 2046].
+  int64_t Imm = N->getSExtValue();
+  return (Imm >= -2049 && Imm < 0) || (Imm > 0 && Imm <= 2046);
+}]>;
+
 // A 6-bit constant greater than 32.
 def uimm6gt32 : ImmLeaf<XLenVT, [{
   return isUInt<6>(Imm) && Imm > 32;
@@ -373,9 +382,9 @@ def ImmSub32 : SDNodeXForm<imm, [{
                                    N->getValueType(0));
 }]>;
 
-// Return an immediate value plus 32.
-def ImmPlus32 : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getSExtValue() + 32, SDLoc(N),
+// Return an immediate value plus 1.
+def ImmPlus1 : SDNodeXForm<imm, [{
+  return CurDAG->getTargetConstant(N->getSExtValue() + 1, SDLoc(N),
                                    N->getValueType(0));
 }]>;
 
@@ -1208,6 +1217,10 @@ def : Pat<(setule GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs2, GPR:$rs1), 1)>;
 def : Pat<(setgt GPR:$rs1, GPR:$rs2), (SLT GPR:$rs2, GPR:$rs1)>;
 def : Pat<(setge GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>;
 def : Pat<(setle GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs2, GPR:$rs1), 1)>;
+def : Pat<(setgt GPR:$rs1, simm12_sub1_nonzero:$imm),
+          (XORI (SLTI GPR:$rs1, (ImmPlus1 simm12_sub1_nonzero:$imm)), 1)>;
+def : Pat<(setugt GPR:$rs1, simm12_sub1_nonzero:$imm),
+          (XORI (SLTIU GPR:$rs1, (ImmPlus1 simm12_sub1_nonzero:$imm)), 1)>;
 
 def IntCCtoRISCVCC : SDNodeXForm<riscv_selectcc, [{
   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();

diff  --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
index 2af4ee69ce222..254bd5c183bfa 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
@@ -109,8 +109,8 @@ define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gedf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -120,8 +120,8 @@ define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gedf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -494,8 +494,8 @@ define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltdf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -505,8 +505,8 @@ define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltdf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -770,8 +770,8 @@ define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gedf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -781,8 +781,8 @@ define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gedf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1103,8 +1103,8 @@ define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltdf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1114,8 +1114,8 @@ define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltdf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll
index 9b8fbc3473f41..3945fab8ca2a2 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll
@@ -119,8 +119,8 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gedf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -130,8 +130,8 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gedf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -452,8 +452,8 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltdf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -463,8 +463,8 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltdf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
index 4b2efdf5ed0cf..64b57cedaf144 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
@@ -109,8 +109,8 @@ define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gesf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -120,8 +120,8 @@ define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gesf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -478,8 +478,8 @@ define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltsf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -489,8 +489,8 @@ define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltsf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -754,8 +754,8 @@ define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gesf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -765,8 +765,8 @@ define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gesf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1071,8 +1071,8 @@ define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltsf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1082,8 +1082,8 @@ define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltsf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll
index 7464553ad325b..663e688d333fe 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll
@@ -119,8 +119,8 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gesf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -130,8 +130,8 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gesf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -436,8 +436,8 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltsf2 at plt
-; RV32I-NEXT:    li a1, -1
-; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -447,8 +447,8 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltsf2 at plt
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/i32-icmp.ll b/llvm/test/CodeGen/RISCV/i32-icmp.ll
index 6a11d24902d61..9b543f103e56a 100644
--- a/llvm/test/CodeGen/RISCV/i32-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/i32-icmp.ll
@@ -136,6 +136,63 @@ define i32 @icmp_ugt(i32 %a, i32 %b) nounwind {
   ret i32 %2
 }
 
+define i32 @icmp_ugt_constant_zero(i32 %a) nounwind {
+; RV32I-LABEL: icmp_ugt_constant_zero:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    ret
+  %1 = icmp ugt i32 %a, 0
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_ugt_constant_2047(i32 %a) nounwind {
+; RV32I-LABEL: icmp_ugt_constant_2047:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 2047
+; RV32I-NEXT:    sltu a0, a1, a0
+; RV32I-NEXT:    ret
+  %1 = icmp ugt i32 %a, 2047
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_ugt_constant_2046(i32 %a) nounwind {
+; RV32I-LABEL: icmp_ugt_constant_2046:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltiu a0, a0, 2047
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    ret
+  %1 = icmp ugt i32 %a, 2046
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_ugt_constant_neg_2049(i32 %a) nounwind {
+; RV32I-LABEL: icmp_ugt_constant_neg_2049:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltiu a0, a0, -2048
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    ret
+; 4294965247 signed extend is -2049
+  %1 = icmp ugt i32 %a, 4294965247
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_ugt_constant_neg_2050(i32 %a) nounwind {
+; RV32I-LABEL: icmp_ugt_constant_neg_2050:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, 1048575
+; RV32I-NEXT:    addi a1, a1, 2046
+; RV32I-NEXT:    sltu a0, a1, a0
+; RV32I-NEXT:    ret
+; 4294965246 signed extend is -2050
+  %1 = icmp ugt i32 %a, 4294965246
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
 define i32 @icmp_uge(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_uge:
 ; RV32I:       # %bb.0:
@@ -178,6 +235,72 @@ define i32 @icmp_sgt(i32 %a, i32 %b) nounwind {
   ret i32 %2
 }
 
+define i32 @icmp_sgt_constant(i32 %a) nounwind {
+; RV32I-LABEL: icmp_sgt_constant:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slti a0, a0, 6
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    ret
+  %1 = icmp sgt i32 %a, 5
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_sgt_constant_zero(i32 %a) nounwind {
+; RV32I-LABEL: icmp_sgt_constant_zero:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sgtz a0, a0
+; RV32I-NEXT:    ret
+  %1 = icmp sgt i32 %a, 0
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_sgt_constant_2046(i32 %a) nounwind {
+; RV32I-LABEL: icmp_sgt_constant_2046:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slti a0, a0, 2047
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    ret
+  %1 = icmp sgt i32 %a, 2046
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_sgt_constant_2047(i32 %a) nounwind {
+; RV32I-LABEL: icmp_sgt_constant_2047:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 2047
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    ret
+  %1 = icmp sgt i32 %a, 2047
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_sgt_constant_neg_2049(i32 %a) nounwind {
+; RV32I-LABEL: icmp_sgt_constant_neg_2049:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slti a0, a0, -2048
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    ret
+  %1 = icmp sgt i32 %a, -2049
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @icmp_sgt_constant_neg_2050(i32 %a) nounwind {
+; RV32I-LABEL: icmp_sgt_constant_neg_2050:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, 1048575
+; RV32I-NEXT:    addi a1, a1, 2046
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    ret
+  %1 = icmp sgt i32 %a, -2050
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
 define i32 @icmp_sge(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: icmp_sge:
 ; RV32I:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/select-constant-xor.ll b/llvm/test/CodeGen/RISCV/select-constant-xor.ll
index 2e38ed1aa3759..61dfef8f55eec 100644
--- a/llvm/test/CodeGen/RISCV/select-constant-xor.ll
+++ b/llvm/test/CodeGen/RISCV/select-constant-xor.ll
@@ -48,8 +48,8 @@ define i64 @selecti64i64(i64 %a) {
 define i32 @selecti64i32(i64 %a) {
 ; RV32-LABEL: selecti64i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    li a0, -1
-; RV32-NEXT:    slt a0, a0, a1
+; RV32-NEXT:    slti a0, a1, 0
+; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:    lui a1, 524288
 ; RV32-NEXT:    sub a0, a1, a0
 ; RV32-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
index 56f5f5a6c216d..9b5c1eaf81977 100644
--- a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
+++ b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
@@ -106,8 +106,8 @@ define i32 @pos_sel_special_constant(i32 signext %a) {
 ;
 ; RV64-LABEL: pos_sel_special_constant:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    li a1, -1
-; RV64-NEXT:    slt a0, a1, a0
+; RV64-NEXT:    slti a0, a0, 0
+; RV64-NEXT:    xori a0, a0, 1
 ; RV64-NEXT:    slli a0, a0, 9
 ; RV64-NEXT:    ret
   %tmp.1 = icmp sgt i32 %a, -1

diff  --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index c7e93bea08b9b..5116f192b928e 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -195,8 +195,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    neg a0, a0
 ; RV32-NEXT:    andi a0, a0, 15
-; RV32-NEXT:    li a1, 3
-; RV32-NEXT:    sltu a0, a1, a0
+; RV32-NEXT:    sltiu a0, a0, 4
+; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: test_urem_odd_setne:
@@ -205,8 +205,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV64-NEXT:    addw a0, a1, a0
 ; RV64-NEXT:    negw a0, a0
 ; RV64-NEXT:    andi a0, a0, 15
-; RV64-NEXT:    li a1, 3
-; RV64-NEXT:    sltu a0, a1, a0
+; RV64-NEXT:    sltiu a0, a0, 4
+; RV64-NEXT:    xori a0, a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32M-LABEL: test_urem_odd_setne:
@@ -215,8 +215,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV32M-NEXT:    add a0, a1, a0
 ; RV32M-NEXT:    neg a0, a0
 ; RV32M-NEXT:    andi a0, a0, 15
-; RV32M-NEXT:    li a1, 3
-; RV32M-NEXT:    sltu a0, a1, a0
+; RV32M-NEXT:    sltiu a0, a0, 4
+; RV32M-NEXT:    xori a0, a0, 1
 ; RV32M-NEXT:    ret
 ;
 ; RV64M-LABEL: test_urem_odd_setne:
@@ -225,8 +225,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV64M-NEXT:    addw a0, a1, a0
 ; RV64M-NEXT:    negw a0, a0
 ; RV64M-NEXT:    andi a0, a0, 15
-; RV64M-NEXT:    li a1, 3
-; RV64M-NEXT:    sltu a0, a1, a0
+; RV64M-NEXT:    sltiu a0, a0, 4
+; RV64M-NEXT:    xori a0, a0, 1
 ; RV64M-NEXT:    ret
 ;
 ; RV32MV-LABEL: test_urem_odd_setne:
@@ -235,8 +235,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV32MV-NEXT:    add a0, a1, a0
 ; RV32MV-NEXT:    neg a0, a0
 ; RV32MV-NEXT:    andi a0, a0, 15
-; RV32MV-NEXT:    li a1, 3
-; RV32MV-NEXT:    sltu a0, a1, a0
+; RV32MV-NEXT:    sltiu a0, a0, 4
+; RV32MV-NEXT:    xori a0, a0, 1
 ; RV32MV-NEXT:    ret
 ;
 ; RV64MV-LABEL: test_urem_odd_setne:
@@ -245,8 +245,8 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV64MV-NEXT:    addw a0, a1, a0
 ; RV64MV-NEXT:    negw a0, a0
 ; RV64MV-NEXT:    andi a0, a0, 15
-; RV64MV-NEXT:    li a1, 3
-; RV64MV-NEXT:    sltu a0, a1, a0
+; RV64MV-NEXT:    sltiu a0, a0, 4
+; RV64MV-NEXT:    xori a0, a0, 1
 ; RV64MV-NEXT:    ret
   %urem = urem i4 %X, 5
   %cmp = icmp ne i4 %urem, 0
@@ -261,8 +261,8 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV32-NEXT:    li a1, 307
 ; RV32-NEXT:    call __mulsi3 at plt
 ; RV32-NEXT:    andi a0, a0, 511
-; RV32-NEXT:    li a1, 1
-; RV32-NEXT:    sltu a0, a1, a0
+; RV32-NEXT:    sltiu a0, a0, 2
+; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -274,8 +274,8 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV64-NEXT:    li a1, 307
 ; RV64-NEXT:    call __muldi3 at plt
 ; RV64-NEXT:    andi a0, a0, 511
-; RV64-NEXT:    li a1, 1
-; RV64-NEXT:    sltu a0, a1, a0
+; RV64-NEXT:    sltiu a0, a0, 2
+; RV64-NEXT:    xori a0, a0, 1
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -285,8 +285,8 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV32M-NEXT:    li a1, 307
 ; RV32M-NEXT:    mul a0, a0, a1
 ; RV32M-NEXT:    andi a0, a0, 511
-; RV32M-NEXT:    li a1, 1
-; RV32M-NEXT:    sltu a0, a1, a0
+; RV32M-NEXT:    sltiu a0, a0, 2
+; RV32M-NEXT:    xori a0, a0, 1
 ; RV32M-NEXT:    ret
 ;
 ; RV64M-LABEL: test_urem_negative_odd:
@@ -294,8 +294,8 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV64M-NEXT:    li a1, 307
 ; RV64M-NEXT:    mulw a0, a0, a1
 ; RV64M-NEXT:    andi a0, a0, 511
-; RV64M-NEXT:    li a1, 1
-; RV64M-NEXT:    sltu a0, a1, a0
+; RV64M-NEXT:    sltiu a0, a0, 2
+; RV64M-NEXT:    xori a0, a0, 1
 ; RV64M-NEXT:    ret
 ;
 ; RV32MV-LABEL: test_urem_negative_odd:
@@ -303,8 +303,8 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV32MV-NEXT:    li a1, 307
 ; RV32MV-NEXT:    mul a0, a0, a1
 ; RV32MV-NEXT:    andi a0, a0, 511
-; RV32MV-NEXT:    li a1, 1
-; RV32MV-NEXT:    sltu a0, a1, a0
+; RV32MV-NEXT:    sltiu a0, a0, 2
+; RV32MV-NEXT:    xori a0, a0, 1
 ; RV32MV-NEXT:    ret
 ;
 ; RV64MV-LABEL: test_urem_negative_odd:
@@ -312,8 +312,8 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV64MV-NEXT:    li a1, 307
 ; RV64MV-NEXT:    mulw a0, a0, a1
 ; RV64MV-NEXT:    andi a0, a0, 511
-; RV64MV-NEXT:    li a1, 1
-; RV64MV-NEXT:    sltu a0, a1, a0
+; RV64MV-NEXT:    sltiu a0, a0, 2
+; RV64MV-NEXT:    xori a0, a0, 1
 ; RV64MV-NEXT:    ret
   %urem = urem i9 %X, -5
   %cmp = icmp ne i9 %urem, 0
@@ -344,8 +344,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32-NEXT:    srli a0, a0, 22
 ; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    andi a0, a0, 2047
-; RV32-NEXT:    li a1, 341
-; RV32-NEXT:    sltu s3, a1, a0
+; RV32-NEXT:    sltiu a0, a0, 342
+; RV32-NEXT:    xori s3, a0, 1
 ; RV32-NEXT:    li a1, 819
 ; RV32-NEXT:    mv a0, s1
 ; RV32-NEXT:    call __mulsi3 at plt
@@ -358,8 +358,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32-NEXT:    call __mulsi3 at plt
 ; RV32-NEXT:    addi a0, a0, -1463
 ; RV32-NEXT:    andi a0, a0, 2047
-; RV32-NEXT:    li a1, 292
-; RV32-NEXT:    sltu a0, a1, a0
+; RV32-NEXT:    sltiu a0, a0, 293
+; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:    neg a1, s3
 ; RV32-NEXT:    neg a0, a0
 ; RV32-NEXT:    neg a2, s1
@@ -404,8 +404,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64-NEXT:    srli a0, a0, 54
 ; RV64-NEXT:    or a0, a0, a1
 ; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    li a1, 341
-; RV64-NEXT:    sltu s3, a1, a0
+; RV64-NEXT:    sltiu a0, a0, 342
+; RV64-NEXT:    xori s3, a0, 1
 ; RV64-NEXT:    li a1, 819
 ; RV64-NEXT:    mv a0, s2
 ; RV64-NEXT:    call __muldi3 at plt
@@ -418,8 +418,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64-NEXT:    call __muldi3 at plt
 ; RV64-NEXT:    addiw a0, a0, -1463
 ; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    li a1, 292
-; RV64-NEXT:    sltu a0, a1, a0
+; RV64-NEXT:    sltiu a0, a0, 293
+; RV64-NEXT:    xori a0, a0, 1
 ; RV64-NEXT:    negw a1, s3
 ; RV64-NEXT:    negw a0, a0
 ; RV64-NEXT:    andi a1, a1, 2047
@@ -456,8 +456,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32M-NEXT:    srli a2, a2, 22
 ; RV32M-NEXT:    or a2, a2, a4
 ; RV32M-NEXT:    andi a2, a2, 2047
-; RV32M-NEXT:    li a4, 341
-; RV32M-NEXT:    sltu a2, a4, a2
+; RV32M-NEXT:    sltiu a2, a2, 342
+; RV32M-NEXT:    xori a2, a2, 1
 ; RV32M-NEXT:    li a4, 819
 ; RV32M-NEXT:    mul a1, a1, a4
 ; RV32M-NEXT:    addi a1, a1, -1638
@@ -468,8 +468,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32M-NEXT:    mul a3, a3, a4
 ; RV32M-NEXT:    addi a3, a3, -1463
 ; RV32M-NEXT:    andi a3, a3, 2047
-; RV32M-NEXT:    li a4, 292
-; RV32M-NEXT:    sltu a3, a4, a3
+; RV32M-NEXT:    sltiu a3, a3, 293
+; RV32M-NEXT:    xori a3, a3, 1
 ; RV32M-NEXT:    neg a2, a2
 ; RV32M-NEXT:    neg a3, a3
 ; RV32M-NEXT:    neg a4, a1
@@ -501,8 +501,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64M-NEXT:    srli a1, a1, 54
 ; RV64M-NEXT:    or a1, a1, a4
 ; RV64M-NEXT:    andi a1, a1, 2047
-; RV64M-NEXT:    li a4, 341
-; RV64M-NEXT:    sltu a1, a4, a1
+; RV64M-NEXT:    sltiu a1, a1, 342
+; RV64M-NEXT:    xori a1, a1, 1
 ; RV64M-NEXT:    li a4, 819
 ; RV64M-NEXT:    mulw a3, a3, a4
 ; RV64M-NEXT:    addiw a3, a3, -1638
@@ -513,8 +513,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64M-NEXT:    mulw a2, a2, a4
 ; RV64M-NEXT:    addiw a2, a2, -1463
 ; RV64M-NEXT:    andi a2, a2, 2047
-; RV64M-NEXT:    li a4, 292
-; RV64M-NEXT:    sltu a2, a4, a2
+; RV64M-NEXT:    sltiu a2, a2, 293
+; RV64M-NEXT:    xori a2, a2, 1
 ; RV64M-NEXT:    negw a1, a1
 ; RV64M-NEXT:    negw a2, a2
 ; RV64M-NEXT:    andi a1, a1, 2047

diff  --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index 7342cb56827c2..8e5cb87d1df80 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -1984,8 +1984,8 @@ define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    xor a0, a1, a0
 ; RV32-NEXT:    xor a1, a1, a3
 ; RV32-NEXT:    and a0, a1, a0
-; RV32-NEXT:    li a1, -1
-; RV32-NEXT:    slt a0, a1, a0
+; RV32-NEXT:    slti a0, a0, 0
+; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssub.not.i64:
@@ -2005,8 +2005,8 @@ define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    xor a0, a1, a0
 ; RV32ZBA-NEXT:    xor a1, a1, a3
 ; RV32ZBA-NEXT:    and a0, a1, a0
-; RV32ZBA-NEXT:    li a1, -1
-; RV32ZBA-NEXT:    slt a0, a1, a0
+; RV32ZBA-NEXT:    slti a0, a0, 0
+; RV32ZBA-NEXT:    xori a0, a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: ssub.not.i64:


        


More information about the llvm-commits mailing list