[llvm] b5a18de - [RISCV] Remove C!=0 restriction from (sub C, (setcc x, y, eq/neq)) -> (add C-1, (setcc x, y, neq/eq)).

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 16 14:55:29 PDT 2022


Author: Craig Topper
Date: 2022-08-16T14:49:52-07:00
New Revision: b5a18de65169b4aa54db0b0ebc3dcc3462570600

URL: https://github.com/llvm/llvm-project/commit/b5a18de65169b4aa54db0b0ebc3dcc3462570600
DIFF: https://github.com/llvm/llvm-project/commit/b5a18de65169b4aa54db0b0ebc3dcc3462570600.diff

LOG: [RISCV] Remove C!=0 restriction from (sub C, (setcc x, y, eq/neq)) -> (add C-1, (setcc x, y, neq/eq)).

While (sub 0, X) can use x0 for the 0, I believe (add X, -1) is
still preferrable. (addi X, -1) can be compressed, sub with x0 on
the LHS is never compressible.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/get-setcc-result-type.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll
    llvm/test/CodeGen/RISCV/select-const.ll
    llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 1311d7bd58a8b..fc9e8d4a57786 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8284,15 +8284,14 @@ static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
   SDValue N1 = N->getOperand(1);
 
   // Prefer to make this 'add 0/1' rather than 'sub 0/1'
-  // sub constant(!0), 0/1 -> add constant - 1, 1/0
-  // NODE: constant == 0, No redundant instructions are generated.
+  // sub constant, 0/1 -> add constant - 1, 1/0
   // (sub constant, (setcc x, y, eq/neq)) ->
   // (add (setcc x, y, neq/eq), constant - 1)
   auto *N0C = dyn_cast<ConstantSDNode>(N0);
   if (N0C && N1.getOpcode() == ISD::SETCC && N1.hasOneUse()) {
     ISD::CondCode CCVal = cast<CondCodeSDNode>(N1.getOperand(2))->get();
     EVT SetCCOpVT = N1.getOperand(0).getValueType();
-    if (!N0C->isZero() && SetCCOpVT.isInteger() && isIntEqualitySetCC(CCVal)) {
+    if (SetCCOpVT.isInteger() && isIntEqualitySetCC(CCVal)) {
       EVT VT = N->getValueType(0);
       APInt ImmValMinus1 = N0C->getAPIntValue() - 1;
       // If this doesn't form ADDI, the transform won't save any instructions

diff  --git a/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll b/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll
index 21923f0769080..f1ad9b3127db3 100644
--- a/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll
+++ b/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll
@@ -5,22 +5,22 @@
 define void @getSetCCResultType(<4 x i32>* %p, <4 x i32>* %q) nounwind {
 ; RV32I-LABEL: getSetCCResultType:
 ; RV32I:       # %bb.0: # %entry
-; RV32I-NEXT:    lw a1, 12(a0)
-; RV32I-NEXT:    lw a2, 8(a0)
+; RV32I-NEXT:    lw a1, 0(a0)
+; RV32I-NEXT:    lw a2, 12(a0)
 ; RV32I-NEXT:    lw a3, 4(a0)
-; RV32I-NEXT:    lw a4, 0(a0)
-; RV32I-NEXT:    seqz a1, a1
-; RV32I-NEXT:    seqz a2, a2
-; RV32I-NEXT:    seqz a3, a3
-; RV32I-NEXT:    seqz a4, a4
-; RV32I-NEXT:    neg a4, a4
-; RV32I-NEXT:    neg a3, a3
-; RV32I-NEXT:    neg a2, a2
-; RV32I-NEXT:    neg a1, a1
-; RV32I-NEXT:    sw a1, 12(a0)
-; RV32I-NEXT:    sw a2, 8(a0)
+; RV32I-NEXT:    lw a4, 8(a0)
+; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    snez a3, a3
+; RV32I-NEXT:    addi a3, a3, -1
+; RV32I-NEXT:    snez a4, a4
+; RV32I-NEXT:    addi a4, a4, -1
+; RV32I-NEXT:    snez a2, a2
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    sw a4, 8(a0)
 ; RV32I-NEXT:    sw a3, 4(a0)
-; RV32I-NEXT:    sw a4, 0(a0)
+; RV32I-NEXT:    sw a1, 0(a0)
 ; RV32I-NEXT:    ret
 entry:
   %0 = load <4 x i32>, <4 x i32>* %p, align 16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
index fd84c0211fa6e..27aae6f5b089e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
@@ -123,8 +123,8 @@ define signext i1 @vreduce_or_v2i1(<2 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.v2i1(<2 x i1> %v)
   ret i1 %red
@@ -152,8 +152,8 @@ define signext i1 @vreduce_and_v2i1(<2 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.v2i1(<2 x i1> %v)
   ret i1 %red
@@ -166,8 +166,8 @@ define signext i1 @vreduce_umax_v2i1(<2 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.v2i1(<2 x i1> %v)
   ret i1 %red
@@ -181,8 +181,8 @@ define signext i1 @vreduce_smax_v2i1(<2 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.v2i1(<2 x i1> %v)
   ret i1 %red
@@ -196,8 +196,8 @@ define signext i1 @vreduce_umin_v2i1(<2 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.v2i1(<2 x i1> %v)
   ret i1 %red
@@ -210,8 +210,8 @@ define signext i1 @vreduce_smin_v2i1(<2 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.v2i1(<2 x i1> %v)
   ret i1 %red
@@ -224,8 +224,8 @@ define signext i1 @vreduce_or_v4i1(<4 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> %v)
   ret i1 %red
@@ -253,8 +253,8 @@ define signext i1 @vreduce_and_v4i1(<4 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.v4i1(<4 x i1> %v)
   ret i1 %red
@@ -267,8 +267,8 @@ define signext i1 @vreduce_umax_v4i1(<4 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.v4i1(<4 x i1> %v)
   ret i1 %red
@@ -282,8 +282,8 @@ define signext i1 @vreduce_smax_v4i1(<4 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.v4i1(<4 x i1> %v)
   ret i1 %red
@@ -297,8 +297,8 @@ define signext i1 @vreduce_umin_v4i1(<4 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.v4i1(<4 x i1> %v)
   ret i1 %red
@@ -311,8 +311,8 @@ define signext i1 @vreduce_smin_v4i1(<4 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.v4i1(<4 x i1> %v)
   ret i1 %red
@@ -325,8 +325,8 @@ define signext i1 @vreduce_or_v8i1(<8 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> %v)
   ret i1 %red
@@ -354,8 +354,8 @@ define signext i1 @vreduce_and_v8i1(<8 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.v8i1(<8 x i1> %v)
   ret i1 %red
@@ -368,8 +368,8 @@ define signext i1 @vreduce_umax_v8i1(<8 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.v8i1(<8 x i1> %v)
   ret i1 %red
@@ -383,8 +383,8 @@ define signext i1 @vreduce_smax_v8i1(<8 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.v8i1(<8 x i1> %v)
   ret i1 %red
@@ -398,8 +398,8 @@ define signext i1 @vreduce_umin_v8i1(<8 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.v8i1(<8 x i1> %v)
   ret i1 %red
@@ -412,8 +412,8 @@ define signext i1 @vreduce_smin_v8i1(<8 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.v8i1(<8 x i1> %v)
   ret i1 %red
@@ -426,8 +426,8 @@ define signext i1 @vreduce_or_v16i1(<16 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> %v)
   ret i1 %red
@@ -455,8 +455,8 @@ define signext i1 @vreduce_and_v16i1(<16 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.v16i1(<16 x i1> %v)
   ret i1 %red
@@ -469,8 +469,8 @@ define signext i1 @vreduce_umax_v16i1(<16 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.v16i1(<16 x i1> %v)
   ret i1 %red
@@ -484,8 +484,8 @@ define signext i1 @vreduce_smax_v16i1(<16 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.v16i1(<16 x i1> %v)
   ret i1 %red
@@ -499,8 +499,8 @@ define signext i1 @vreduce_umin_v16i1(<16 x i1> %v) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.v16i1(<16 x i1> %v)
   ret i1 %red
@@ -513,8 +513,8 @@ define signext i1 @vreduce_smin_v16i1(<16 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.v16i1(<16 x i1> %v)
   ret i1 %red
@@ -528,8 +528,8 @@ define signext i1 @vreduce_or_v32i1(<32 x i1> %v) {
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vmor.mm v8, v0, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    snez a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    seqz a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_or_v32i1:
@@ -537,8 +537,8 @@ define signext i1 @vreduce_or_v32i1(<32 x i1> %v) {
 ; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vcpop.m a0, v0
-; LMULMAX8-NEXT:    snez a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    seqz a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.v32i1(<32 x i1> %v)
   ret i1 %red
@@ -576,8 +576,8 @@ define signext i1 @vreduce_and_v32i1(<32 x i1> %v) {
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vmnand.mm v8, v0, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    seqz a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    snez a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_and_v32i1:
@@ -586,8 +586,8 @@ define signext i1 @vreduce_and_v32i1(<32 x i1> %v) {
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vmnot.m v8, v0
 ; LMULMAX8-NEXT:    vcpop.m a0, v8
-; LMULMAX8-NEXT:    seqz a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    snez a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.v32i1(<32 x i1> %v)
   ret i1 %red
@@ -601,8 +601,8 @@ define signext i1 @vreduce_umax_v32i1(<32 x i1> %v) {
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vmor.mm v8, v0, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    snez a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    seqz a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_umax_v32i1:
@@ -610,8 +610,8 @@ define signext i1 @vreduce_umax_v32i1(<32 x i1> %v) {
 ; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vcpop.m a0, v0
-; LMULMAX8-NEXT:    snez a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    seqz a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.v32i1(<32 x i1> %v)
   ret i1 %red
@@ -625,8 +625,8 @@ define signext i1 @vreduce_smax_v32i1(<32 x i1> %v) {
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vmnand.mm v8, v0, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    seqz a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    snez a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_smax_v32i1:
@@ -635,8 +635,8 @@ define signext i1 @vreduce_smax_v32i1(<32 x i1> %v) {
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vmnot.m v8, v0
 ; LMULMAX8-NEXT:    vcpop.m a0, v8
-; LMULMAX8-NEXT:    seqz a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    snez a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.v32i1(<32 x i1> %v)
   ret i1 %red
@@ -650,8 +650,8 @@ define signext i1 @vreduce_umin_v32i1(<32 x i1> %v) {
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vmnand.mm v8, v0, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    seqz a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    snez a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_umin_v32i1:
@@ -660,8 +660,8 @@ define signext i1 @vreduce_umin_v32i1(<32 x i1> %v) {
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vmnot.m v8, v0
 ; LMULMAX8-NEXT:    vcpop.m a0, v8
-; LMULMAX8-NEXT:    seqz a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    snez a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.v32i1(<32 x i1> %v)
   ret i1 %red
@@ -675,8 +675,8 @@ define signext i1 @vreduce_smin_v32i1(<32 x i1> %v) {
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-NEXT:    vmor.mm v8, v0, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    snez a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    seqz a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_smin_v32i1:
@@ -684,8 +684,8 @@ define signext i1 @vreduce_smin_v32i1(<32 x i1> %v) {
 ; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vcpop.m a0, v0
-; LMULMAX8-NEXT:    snez a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    seqz a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.v32i1(<32 x i1> %v)
   ret i1 %red
@@ -701,8 +701,8 @@ define signext i1 @vreduce_or_v64i1(<64 x i1> %v) {
 ; LMULMAX1-NEXT:    vmor.mm v9, v0, v9
 ; LMULMAX1-NEXT:    vmor.mm v8, v9, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    snez a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    seqz a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_or_v64i1:
@@ -710,8 +710,8 @@ define signext i1 @vreduce_or_v64i1(<64 x i1> %v) {
 ; LMULMAX8-NEXT:    li a0, 64
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; LMULMAX8-NEXT:    vcpop.m a0, v0
-; LMULMAX8-NEXT:    snez a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    seqz a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.v64i1(<64 x i1> %v)
   ret i1 %red
@@ -753,8 +753,8 @@ define signext i1 @vreduce_and_v64i1(<64 x i1> %v) {
 ; LMULMAX1-NEXT:    vmand.mm v9, v0, v9
 ; LMULMAX1-NEXT:    vmnand.mm v8, v9, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    seqz a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    snez a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_and_v64i1:
@@ -763,8 +763,8 @@ define signext i1 @vreduce_and_v64i1(<64 x i1> %v) {
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; LMULMAX8-NEXT:    vmnot.m v8, v0
 ; LMULMAX8-NEXT:    vcpop.m a0, v8
-; LMULMAX8-NEXT:    seqz a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    snez a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.v64i1(<64 x i1> %v)
   ret i1 %red
@@ -780,8 +780,8 @@ define signext i1 @vreduce_umax_v64i1(<64 x i1> %v) {
 ; LMULMAX1-NEXT:    vmor.mm v9, v0, v9
 ; LMULMAX1-NEXT:    vmor.mm v8, v9, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    snez a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    seqz a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_umax_v64i1:
@@ -789,8 +789,8 @@ define signext i1 @vreduce_umax_v64i1(<64 x i1> %v) {
 ; LMULMAX8-NEXT:    li a0, 64
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; LMULMAX8-NEXT:    vcpop.m a0, v0
-; LMULMAX8-NEXT:    snez a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    seqz a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.v64i1(<64 x i1> %v)
   ret i1 %red
@@ -806,8 +806,8 @@ define signext i1 @vreduce_smax_v64i1(<64 x i1> %v) {
 ; LMULMAX1-NEXT:    vmand.mm v9, v0, v9
 ; LMULMAX1-NEXT:    vmnand.mm v8, v9, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    seqz a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    snez a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_smax_v64i1:
@@ -816,8 +816,8 @@ define signext i1 @vreduce_smax_v64i1(<64 x i1> %v) {
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; LMULMAX8-NEXT:    vmnot.m v8, v0
 ; LMULMAX8-NEXT:    vcpop.m a0, v8
-; LMULMAX8-NEXT:    seqz a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    snez a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.v64i1(<64 x i1> %v)
   ret i1 %red
@@ -833,8 +833,8 @@ define signext i1 @vreduce_umin_v64i1(<64 x i1> %v) {
 ; LMULMAX1-NEXT:    vmand.mm v9, v0, v9
 ; LMULMAX1-NEXT:    vmnand.mm v8, v9, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    seqz a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    snez a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_umin_v64i1:
@@ -843,8 +843,8 @@ define signext i1 @vreduce_umin_v64i1(<64 x i1> %v) {
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; LMULMAX8-NEXT:    vmnot.m v8, v0
 ; LMULMAX8-NEXT:    vcpop.m a0, v8
-; LMULMAX8-NEXT:    seqz a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    snez a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.v64i1(<64 x i1> %v)
   ret i1 %red
@@ -860,8 +860,8 @@ define signext i1 @vreduce_smin_v64i1(<64 x i1> %v) {
 ; LMULMAX1-NEXT:    vmor.mm v9, v0, v9
 ; LMULMAX1-NEXT:    vmor.mm v8, v9, v8
 ; LMULMAX1-NEXT:    vcpop.m a0, v8
-; LMULMAX1-NEXT:    snez a0, a0
-; LMULMAX1-NEXT:    neg a0, a0
+; LMULMAX1-NEXT:    seqz a0, a0
+; LMULMAX1-NEXT:    addi a0, a0, -1
 ; LMULMAX1-NEXT:    ret
 ;
 ; LMULMAX8-LABEL: vreduce_smin_v64i1:
@@ -869,8 +869,8 @@ define signext i1 @vreduce_smin_v64i1(<64 x i1> %v) {
 ; LMULMAX8-NEXT:    li a0, 64
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; LMULMAX8-NEXT:    vcpop.m a0, v0
-; LMULMAX8-NEXT:    snez a0, a0
-; LMULMAX8-NEXT:    neg a0, a0
+; LMULMAX8-NEXT:    seqz a0, a0
+; LMULMAX8-NEXT:    addi a0, a0, -1
 ; LMULMAX8-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.v64i1(<64 x i1> %v)
   ret i1 %red

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll
index 184aa33e9853b..9cb380e706dc8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll
@@ -9,8 +9,8 @@ define signext i1 @vreduce_or_nxv1i1(<vscale x 1 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.nxv1i1(<vscale x 1 x i1> %v)
   ret i1 %red
@@ -38,8 +38,8 @@ define signext i1 @vreduce_and_nxv1i1(<vscale x 1 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.nxv1i1(<vscale x 1 x i1> %v)
   ret i1 %red
@@ -52,8 +52,8 @@ define signext i1 @vreduce_umax_nxv1i1(<vscale x 1 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.nxv1i1(<vscale x 1 x i1> %v)
   ret i1 %red
@@ -67,8 +67,8 @@ define signext i1 @vreduce_smax_nxv1i1(<vscale x 1 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.nxv1i1(<vscale x 1 x i1> %v)
   ret i1 %red
@@ -82,8 +82,8 @@ define signext i1 @vreduce_umin_nxv1i1(<vscale x 1 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.nxv1i1(<vscale x 1 x i1> %v)
   ret i1 %red
@@ -96,8 +96,8 @@ define signext i1 @vreduce_smin_nxv1i1(<vscale x 1 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.nxv1i1(<vscale x 1 x i1> %v)
   ret i1 %red
@@ -110,8 +110,8 @@ define signext i1 @vreduce_or_nxv2i1(<vscale x 2 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.nxv2i1(<vscale x 2 x i1> %v)
   ret i1 %red
@@ -139,8 +139,8 @@ define signext i1 @vreduce_and_nxv2i1(<vscale x 2 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.nxv2i1(<vscale x 2 x i1> %v)
   ret i1 %red
@@ -153,8 +153,8 @@ define signext i1 @vreduce_umax_nxv2i1(<vscale x 2 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.nxv2i1(<vscale x 2 x i1> %v)
   ret i1 %red
@@ -168,8 +168,8 @@ define signext i1 @vreduce_smax_nxv2i1(<vscale x 2 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.nxv2i1(<vscale x 2 x i1> %v)
   ret i1 %red
@@ -183,8 +183,8 @@ define signext i1 @vreduce_umin_nxv2i1(<vscale x 2 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.nxv2i1(<vscale x 2 x i1> %v)
   ret i1 %red
@@ -197,8 +197,8 @@ define signext i1 @vreduce_smin_nxv2i1(<vscale x 2 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.nxv2i1(<vscale x 2 x i1> %v)
   ret i1 %red
@@ -211,8 +211,8 @@ define signext i1 @vreduce_or_nxv4i1(<vscale x 4 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> %v)
   ret i1 %red
@@ -240,8 +240,8 @@ define signext i1 @vreduce_and_nxv4i1(<vscale x 4 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.nxv4i1(<vscale x 4 x i1> %v)
   ret i1 %red
@@ -254,8 +254,8 @@ define signext i1 @vreduce_umax_nxv4i1(<vscale x 4 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.nxv4i1(<vscale x 4 x i1> %v)
   ret i1 %red
@@ -269,8 +269,8 @@ define signext i1 @vreduce_smax_nxv4i1(<vscale x 4 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.nxv4i1(<vscale x 4 x i1> %v)
   ret i1 %red
@@ -284,8 +284,8 @@ define signext i1 @vreduce_umin_nxv4i1(<vscale x 4 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.nxv4i1(<vscale x 4 x i1> %v)
   ret i1 %red
@@ -298,8 +298,8 @@ define signext i1 @vreduce_smin_nxv4i1(<vscale x 4 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.nxv4i1(<vscale x 4 x i1> %v)
   ret i1 %red
@@ -312,8 +312,8 @@ define signext i1 @vreduce_or_nxv8i1(<vscale x 8 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.nxv8i1(<vscale x 8 x i1> %v)
   ret i1 %red
@@ -341,8 +341,8 @@ define signext i1 @vreduce_and_nxv8i1(<vscale x 8 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.nxv8i1(<vscale x 8 x i1> %v)
   ret i1 %red
@@ -355,8 +355,8 @@ define signext i1 @vreduce_umax_nxv8i1(<vscale x 8 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.nxv8i1(<vscale x 8 x i1> %v)
   ret i1 %red
@@ -370,8 +370,8 @@ define signext i1 @vreduce_smax_nxv8i1(<vscale x 8 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.nxv8i1(<vscale x 8 x i1> %v)
   ret i1 %red
@@ -385,8 +385,8 @@ define signext i1 @vreduce_umin_nxv8i1(<vscale x 8 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.nxv8i1(<vscale x 8 x i1> %v)
   ret i1 %red
@@ -399,8 +399,8 @@ define signext i1 @vreduce_smin_nxv8i1(<vscale x 8 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.nxv8i1(<vscale x 8 x i1> %v)
   ret i1 %red
@@ -413,8 +413,8 @@ define signext i1 @vreduce_or_nxv16i1(<vscale x 16 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> %v)
   ret i1 %red
@@ -442,8 +442,8 @@ define signext i1 @vreduce_and_nxv16i1(<vscale x 16 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.nxv16i1(<vscale x 16 x i1> %v)
   ret i1 %red
@@ -456,8 +456,8 @@ define signext i1 @vreduce_umax_nxv16i1(<vscale x 16 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.nxv16i1(<vscale x 16 x i1> %v)
   ret i1 %red
@@ -471,8 +471,8 @@ define signext i1 @vreduce_smax_nxv16i1(<vscale x 16 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.nxv16i1(<vscale x 16 x i1> %v)
   ret i1 %red
@@ -486,8 +486,8 @@ define signext i1 @vreduce_umin_nxv16i1(<vscale x 16 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.nxv16i1(<vscale x 16 x i1> %v)
   ret i1 %red
@@ -500,8 +500,8 @@ define signext i1 @vreduce_smin_nxv16i1(<vscale x 16 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.nxv16i1(<vscale x 16 x i1> %v)
   ret i1 %red
@@ -514,8 +514,8 @@ define signext i1 @vreduce_or_nxv32i1(<vscale x 32 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.nxv32i1(<vscale x 32 x i1> %v)
   ret i1 %red
@@ -543,8 +543,8 @@ define signext i1 @vreduce_and_nxv32i1(<vscale x 32 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.nxv32i1(<vscale x 32 x i1> %v)
   ret i1 %red
@@ -557,8 +557,8 @@ define signext i1 @vreduce_umax_nxv32i1(<vscale x 32 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.nxv32i1(<vscale x 32 x i1> %v)
   ret i1 %red
@@ -572,8 +572,8 @@ define signext i1 @vreduce_smax_nxv32i1(<vscale x 32 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.nxv32i1(<vscale x 32 x i1> %v)
   ret i1 %red
@@ -587,8 +587,8 @@ define signext i1 @vreduce_umin_nxv32i1(<vscale x 32 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.nxv32i1(<vscale x 32 x i1> %v)
   ret i1 %red
@@ -601,8 +601,8 @@ define signext i1 @vreduce_smin_nxv32i1(<vscale x 32 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.nxv32i1(<vscale x 32 x i1> %v)
   ret i1 %red
@@ -615,8 +615,8 @@ define signext i1 @vreduce_or_nxv64i1(<vscale x 64 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.nxv64i1(<vscale x 64 x i1> %v)
   ret i1 %red
@@ -644,8 +644,8 @@ define signext i1 @vreduce_and_nxv64i1(<vscale x 64 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.nxv64i1(<vscale x 64 x i1> %v)
   ret i1 %red
@@ -658,8 +658,8 @@ define signext i1 @vreduce_umax_nxv64i1(<vscale x 64 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.nxv64i1(<vscale x 64 x i1> %v)
   ret i1 %red
@@ -673,8 +673,8 @@ define signext i1 @vreduce_smax_nxv64i1(<vscale x 64 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.nxv64i1(<vscale x 64 x i1> %v)
   ret i1 %red
@@ -688,8 +688,8 @@ define signext i1 @vreduce_umin_nxv64i1(<vscale x 64 x i1> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmnot.m v8, v0
 ; CHECK-NEXT:    vcpop.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    snez a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.nxv64i1(<vscale x 64 x i1> %v)
   ret i1 %red
@@ -702,8 +702,8 @@ define signext i1 @vreduce_smin_nxv64i1(<vscale x 64 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    snez a0, a0
-; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.nxv64i1(<vscale x 64 x i1> %v)
   ret i1 %red

diff  --git a/llvm/test/CodeGen/RISCV/select-const.ll b/llvm/test/CodeGen/RISCV/select-const.ll
index 071626e9c63fc..5397710bdeaf7 100644
--- a/llvm/test/CodeGen/RISCV/select-const.ll
+++ b/llvm/test/CodeGen/RISCV/select-const.ll
@@ -236,15 +236,15 @@ define signext i32 @select_eq_zero_negone(i32 signext %a, i32 signext %b) nounwi
 ; RV32-LABEL: select_eq_zero_negone:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    seqz a0, a0
-; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    snez a0, a0
+; RV32-NEXT:    addi a0, a0, -1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: select_eq_zero_negone:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    xor a0, a0, a1
-; RV64-NEXT:    seqz a0, a0
-; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    addi a0, a0, -1
 ; RV64-NEXT:    ret
   %1 = icmp eq i32 %a, %b
   %2 = select i1 %1, i32 -1, i32 0
@@ -255,15 +255,15 @@ define signext i32 @select_ne_zero_negone(i32 signext %a, i32 signext %b) nounwi
 ; RV32-LABEL: select_ne_zero_negone:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    xor a0, a0, a1
-; RV32-NEXT:    snez a0, a0
-; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    addi a0, a0, -1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: select_ne_zero_negone:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    xor a0, a0, a1
-; RV64-NEXT:    snez a0, a0
-; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    addi a0, a0, -1
 ; RV64-NEXT:    ret
   %1 = icmp ne i32 %a, %b
   %2 = select i1 %1, i32 -1, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
index 155ae902c628d..46d16c0a32500 100644
--- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
+++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
@@ -475,16 +475,16 @@ define i32 @sext_of_not_cmp_i32(i32 %x) {
 ; RV32I-LABEL: sext_of_not_cmp_i32:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a0, a0, -7
-; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: sext_of_not_cmp_i32:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    addi a0, a0, -7
-; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    ret
   %cmp = icmp eq i32 %x, 7
   %xor = xor i1 %cmp, 1
@@ -497,16 +497,16 @@ define i64 @sext_of_not_cmp_i64(i64 %x) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    xori a0, a0, 7
 ; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: sext_of_not_cmp_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi a0, a0, -7
-; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    ret
   %cmp = icmp eq i64 %x, 7
   %xor = xor i1 %cmp, 1

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 7838d7dc44b2a..42c72e0ae32a9 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -347,27 +347,27 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32-NEXT:    call __moddi3 at plt
 ; RV32-NEXT:    xori a2, s2, 2
 ; RV32-NEXT:    or a2, a2, s3
-; RV32-NEXT:    snez a2, a2
 ; RV32-NEXT:    xori a3, s5, 1
 ; RV32-NEXT:    or a3, a3, s6
-; RV32-NEXT:    snez a3, a3
 ; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    snez a0, a0
-; RV32-NEXT:    neg a1, a3
-; RV32-NEXT:    neg a4, a2
-; RV32-NEXT:    neg a5, a0
-; RV32-NEXT:    sw a5, 0(s0)
-; RV32-NEXT:    andi a4, a4, 7
-; RV32-NEXT:    sb a4, 12(s0)
-; RV32-NEXT:    slli a3, a3, 1
-; RV32-NEXT:    sub a0, a0, a3
+; RV32-NEXT:    seqz a1, a3
+; RV32-NEXT:    addi a1, a1, -1
+; RV32-NEXT:    seqz a2, a2
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    neg a3, a0
+; RV32-NEXT:    sw a3, 0(s0)
+; RV32-NEXT:    andi a3, a2, 7
+; RV32-NEXT:    sb a3, 12(s0)
+; RV32-NEXT:    slli a3, a1, 1
+; RV32-NEXT:    or a0, a3, a0
 ; RV32-NEXT:    sw a0, 4(s0)
 ; RV32-NEXT:    srli a0, a1, 31
 ; RV32-NEXT:    andi a1, a1, 1
 ; RV32-NEXT:    slli a1, a1, 1
 ; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    slli a1, a2, 2
-; RV32-NEXT:    sub a0, a0, a1
+; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    sw a0, 8(s0)
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -389,24 +389,24 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    mv s0, a0
-; RV64-NEXT:    lb a0, 12(a0)
-; RV64-NEXT:    lwu a1, 8(s0)
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    ld a2, 0(s0)
+; RV64-NEXT:    lwu a0, 8(a0)
+; RV64-NEXT:    ld a1, 0(s0)
+; RV64-NEXT:    slli a2, a0, 31
+; RV64-NEXT:    srli a3, a1, 33
+; RV64-NEXT:    lb a4, 12(s0)
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    slli a2, a2, 31
+; RV64-NEXT:    srai s1, a2, 31
+; RV64-NEXT:    slli a2, a4, 32
+; RV64-NEXT:    or a0, a0, a2
 ; RV64-NEXT:    slli a0, a0, 29
-; RV64-NEXT:    srai s1, a0, 31
-; RV64-NEXT:    slli a0, a1, 31
-; RV64-NEXT:    srli a1, a2, 33
-; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    slli a0, a0, 31
 ; RV64-NEXT:    srai a0, a0, 31
-; RV64-NEXT:    slli a1, a2, 31
+; RV64-NEXT:    slli a1, a1, 31
 ; RV64-NEXT:    srai s2, a1, 31
-; RV64-NEXT:    li a1, 7
+; RV64-NEXT:    li a1, -5
 ; RV64-NEXT:    call __moddi3 at plt
 ; RV64-NEXT:    mv s3, a0
-; RV64-NEXT:    li a1, -5
+; RV64-NEXT:    li a1, 7
 ; RV64-NEXT:    mv a0, s1
 ; RV64-NEXT:    call __moddi3 at plt
 ; RV64-NEXT:    mv s1, a0
@@ -421,25 +421,25 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64-NEXT:    srli a0, a0, 1
 ; RV64-NEXT:    or a0, a0, a2
 ; RV64-NEXT:    sltu a0, a1, a0
-; RV64-NEXT:    addi a1, s1, -2
-; RV64-NEXT:    snez a1, a1
-; RV64-NEXT:    addi a2, s3, -1
-; RV64-NEXT:    snez a2, a2
 ; RV64-NEXT:    neg a0, a0
-; RV64-NEXT:    neg a3, a2
-; RV64-NEXT:    neg a4, a1
-; RV64-NEXT:    slli a4, a4, 29
-; RV64-NEXT:    srli a4, a4, 61
-; RV64-NEXT:    sb a4, 12(s0)
-; RV64-NEXT:    slliw a1, a1, 2
-; RV64-NEXT:    slli a3, a3, 31
+; RV64-NEXT:    addi a1, s1, -1
+; RV64-NEXT:    seqz a1, a1
+; RV64-NEXT:    addi a1, a1, -1
+; RV64-NEXT:    addi a2, s3, -2
+; RV64-NEXT:    seqz a2, a2
+; RV64-NEXT:    addi a2, a2, -1
+; RV64-NEXT:    slli a3, a2, 29
+; RV64-NEXT:    srli a3, a3, 61
+; RV64-NEXT:    sb a3, 12(s0)
+; RV64-NEXT:    slli a2, a2, 2
+; RV64-NEXT:    slli a3, a1, 31
 ; RV64-NEXT:    srli a3, a3, 62
-; RV64-NEXT:    subw a1, a3, a1
-; RV64-NEXT:    sw a1, 8(s0)
+; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    sw a2, 8(s0)
 ; RV64-NEXT:    slli a0, a0, 31
 ; RV64-NEXT:    srli a0, a0, 31
-; RV64-NEXT:    slli a1, a2, 33
-; RV64-NEXT:    sub a0, a0, a1
+; RV64-NEXT:    slli a1, a1, 33
+; RV64-NEXT:    or a0, a0, a1
 ; RV64-NEXT:    sd a0, 0(s0)
 ; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -498,27 +498,27 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32M-NEXT:    call __moddi3 at plt
 ; RV32M-NEXT:    xori a2, s2, 2
 ; RV32M-NEXT:    or a2, a2, s3
-; RV32M-NEXT:    snez a2, a2
 ; RV32M-NEXT:    xori a3, s5, 1
 ; RV32M-NEXT:    or a3, a3, s6
-; RV32M-NEXT:    snez a3, a3
 ; RV32M-NEXT:    or a0, a0, a1
 ; RV32M-NEXT:    snez a0, a0
-; RV32M-NEXT:    neg a1, a3
-; RV32M-NEXT:    neg a4, a2
-; RV32M-NEXT:    neg a5, a0
-; RV32M-NEXT:    sw a5, 0(s0)
-; RV32M-NEXT:    andi a4, a4, 7
-; RV32M-NEXT:    sb a4, 12(s0)
-; RV32M-NEXT:    slli a3, a3, 1
-; RV32M-NEXT:    sub a0, a0, a3
+; RV32M-NEXT:    seqz a1, a3
+; RV32M-NEXT:    addi a1, a1, -1
+; RV32M-NEXT:    seqz a2, a2
+; RV32M-NEXT:    addi a2, a2, -1
+; RV32M-NEXT:    neg a3, a0
+; RV32M-NEXT:    sw a3, 0(s0)
+; RV32M-NEXT:    andi a3, a2, 7
+; RV32M-NEXT:    sb a3, 12(s0)
+; RV32M-NEXT:    slli a3, a1, 1
+; RV32M-NEXT:    or a0, a3, a0
 ; RV32M-NEXT:    sw a0, 4(s0)
 ; RV32M-NEXT:    srli a0, a1, 31
 ; RV32M-NEXT:    andi a1, a1, 1
 ; RV32M-NEXT:    slli a1, a1, 1
 ; RV32M-NEXT:    or a0, a0, a1
 ; RV32M-NEXT:    slli a1, a2, 2
-; RV32M-NEXT:    sub a0, a0, a1
+; RV32M-NEXT:    or a0, a0, a1
 ; RV32M-NEXT:    sw a0, 8(s0)
 ; RV32M-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32M-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -533,67 +533,67 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ;
 ; RV64M-LABEL: test_srem_vec:
 ; RV64M:       # %bb.0:
-; RV64M-NEXT:    lb a1, 12(a0)
-; RV64M-NEXT:    lwu a2, 8(a0)
-; RV64M-NEXT:    slli a1, a1, 32
-; RV64M-NEXT:    or a1, a2, a1
-; RV64M-NEXT:    ld a3, 0(a0)
-; RV64M-NEXT:    slli a1, a1, 29
-; RV64M-NEXT:    srai a1, a1, 31
-; RV64M-NEXT:    slli a2, a2, 31
-; RV64M-NEXT:    srli a4, a3, 33
+; RV64M-NEXT:    lwu a1, 8(a0)
+; RV64M-NEXT:    ld a2, 0(a0)
+; RV64M-NEXT:    lb a3, 12(a0)
+; RV64M-NEXT:    slli a4, a1, 31
+; RV64M-NEXT:    srli a5, a2, 33
+; RV64M-NEXT:    or a4, a5, a4
+; RV64M-NEXT:    slli a3, a3, 32
 ; RV64M-NEXT:    lui a5, %hi(.LCPI3_0)
 ; RV64M-NEXT:    ld a5, %lo(.LCPI3_0)(a5)
-; RV64M-NEXT:    or a2, a4, a2
-; RV64M-NEXT:    slli a2, a2, 31
-; RV64M-NEXT:    srai a2, a2, 31
-; RV64M-NEXT:    mulh a4, a2, a5
-; RV64M-NEXT:    srli a5, a4, 63
-; RV64M-NEXT:    srai a4, a4, 1
-; RV64M-NEXT:    add a4, a4, a5
-; RV64M-NEXT:    slli a5, a4, 3
-; RV64M-NEXT:    sub a4, a4, a5
+; RV64M-NEXT:    or a1, a1, a3
+; RV64M-NEXT:    slli a1, a1, 29
+; RV64M-NEXT:    srai a1, a1, 31
+; RV64M-NEXT:    mulh a3, a1, a5
+; RV64M-NEXT:    srli a5, a3, 63
+; RV64M-NEXT:    srai a3, a3, 1
+; RV64M-NEXT:    add a3, a3, a5
+; RV64M-NEXT:    slli a5, a3, 2
+; RV64M-NEXT:    add a3, a5, a3
 ; RV64M-NEXT:    lui a5, %hi(.LCPI3_1)
 ; RV64M-NEXT:    ld a5, %lo(.LCPI3_1)(a5)
-; RV64M-NEXT:    slli a3, a3, 31
-; RV64M-NEXT:    srai a3, a3, 31
-; RV64M-NEXT:    add a2, a2, a4
-; RV64M-NEXT:    mulh a4, a1, a5
-; RV64M-NEXT:    srli a5, a4, 63
-; RV64M-NEXT:    srai a4, a4, 1
-; RV64M-NEXT:    add a4, a4, a5
-; RV64M-NEXT:    slli a5, a4, 2
-; RV64M-NEXT:    add a4, a5, a4
-; RV64M-NEXT:    add a1, a1, a4
-; RV64M-NEXT:    addi a1, a1, -2
-; RV64M-NEXT:    snez a1, a1
+; RV64M-NEXT:    slli a4, a4, 31
+; RV64M-NEXT:    srai a4, a4, 31
+; RV64M-NEXT:    add a1, a1, a3
+; RV64M-NEXT:    mulh a3, a4, a5
+; RV64M-NEXT:    srli a5, a3, 63
+; RV64M-NEXT:    srai a3, a3, 1
+; RV64M-NEXT:    add a3, a3, a5
+; RV64M-NEXT:    slli a5, a3, 3
+; RV64M-NEXT:    sub a3, a3, a5
+; RV64M-NEXT:    add a3, a4, a3
 ; RV64M-NEXT:    lui a4, %hi(.LCPI3_2)
 ; RV64M-NEXT:    ld a4, %lo(.LCPI3_2)(a4)
 ; RV64M-NEXT:    lui a5, %hi(.LCPI3_3)
 ; RV64M-NEXT:    ld a5, %lo(.LCPI3_3)(a5)
-; RV64M-NEXT:    addi a2, a2, -1
-; RV64M-NEXT:    snez a2, a2
-; RV64M-NEXT:    mul a3, a3, a4
-; RV64M-NEXT:    add a3, a3, a5
-; RV64M-NEXT:    slli a4, a3, 63
-; RV64M-NEXT:    srli a3, a3, 1
-; RV64M-NEXT:    or a3, a3, a4
-; RV64M-NEXT:    sltu a3, a5, a3
-; RV64M-NEXT:    neg a4, a2
-; RV64M-NEXT:    neg a5, a1
-; RV64M-NEXT:    neg a3, a3
-; RV64M-NEXT:    slli a2, a2, 33
-; RV64M-NEXT:    slli a3, a3, 31
-; RV64M-NEXT:    srli a3, a3, 31
-; RV64M-NEXT:    sub a2, a3, a2
+; RV64M-NEXT:    slli a2, a2, 31
+; RV64M-NEXT:    srai a2, a2, 31
+; RV64M-NEXT:    mul a2, a2, a4
+; RV64M-NEXT:    add a2, a2, a5
+; RV64M-NEXT:    slli a4, a2, 63
+; RV64M-NEXT:    srli a2, a2, 1
+; RV64M-NEXT:    or a2, a2, a4
+; RV64M-NEXT:    sltu a2, a5, a2
+; RV64M-NEXT:    addi a3, a3, -1
+; RV64M-NEXT:    seqz a3, a3
+; RV64M-NEXT:    addi a3, a3, -1
+; RV64M-NEXT:    addi a1, a1, -2
+; RV64M-NEXT:    seqz a1, a1
+; RV64M-NEXT:    addi a1, a1, -1
+; RV64M-NEXT:    neg a2, a2
+; RV64M-NEXT:    slli a4, a1, 29
+; RV64M-NEXT:    srli a4, a4, 61
+; RV64M-NEXT:    sb a4, 12(a0)
+; RV64M-NEXT:    slli a4, a3, 33
+; RV64M-NEXT:    slli a2, a2, 31
+; RV64M-NEXT:    srli a2, a2, 31
+; RV64M-NEXT:    or a2, a2, a4
 ; RV64M-NEXT:    sd a2, 0(a0)
-; RV64M-NEXT:    slli a2, a5, 29
-; RV64M-NEXT:    srli a2, a2, 61
-; RV64M-NEXT:    sb a2, 12(a0)
-; RV64M-NEXT:    slliw a1, a1, 2
-; RV64M-NEXT:    slli a2, a4, 31
+; RV64M-NEXT:    slli a1, a1, 2
+; RV64M-NEXT:    slli a2, a3, 31
 ; RV64M-NEXT:    srli a2, a2, 62
-; RV64M-NEXT:    subw a1, a2, a1
+; RV64M-NEXT:    or a1, a2, a1
 ; RV64M-NEXT:    sw a1, 8(a0)
 ; RV64M-NEXT:    ret
 ;


        


More information about the llvm-commits mailing list