[llvm] d9683a7 - [RISCV] Fix extract_vector_elt on i1 at idx 0 being inverted

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Thu May 4 03:45:41 PDT 2023


Author: Luke Lau
Date: 2023-05-04T11:45:35+01:00
New Revision: d9683a70fef48cfaee2c83147a3b26f4f90162a2

URL: https://github.com/llvm/llvm-project/commit/d9683a70fef48cfaee2c83147a3b26f4f90162a2
DIFF: https://github.com/llvm/llvm-project/commit/d9683a70fef48cfaee2c83147a3b26f4f90162a2.diff

LOG: [RISCV] Fix extract_vector_elt on i1 at idx 0 being inverted

It looks like the intention here is to truncate a XLenVT -> i1, in
which case we should be emitting snez instead of sneq if I'm understanding
correctly.

Reviewed By: jacquesguan, frasercrmck

Differential Revision: https://reviews.llvm.org/D149732

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
    llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
    llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 77f0d5ff17fa..12bd4dff367e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -6165,7 +6165,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
       SDValue Vfirst =
           DAG.getNode(RISCVISD::VFIRST_VL, DL, XLenVT, Vec, Mask, VL);
       return DAG.getSetCC(DL, XLenVT, Vfirst, DAG.getConstant(0, DL, XLenVT),
-                          ISD::SETEQ);
+                          ISD::SETNE);
     }
     if (VecVT.isFixedLengthVector()) {
       unsigned NumElts = VecVT.getVectorNumElements();

diff  --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
index 8089d0adedc9..7d8395439301 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
@@ -29,7 +29,7 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
 ; RV32-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; RV32-NEXT:    vmv.x.s a0, v8
 ; RV32-NEXT:    vfirst.m a1, v10
-; RV32-NEXT:    seqz a1, a1
+; RV32-NEXT:    snez a1, a1
 ; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; RV32-NEXT:    vmv.v.x v8, a1
 ; RV32-NEXT:    vmsne.vi v0, v8, 0
@@ -51,7 +51,7 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
 ; RV64-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    vfirst.m a1, v12
-; RV64-NEXT:    seqz a1, a1
+; RV64-NEXT:    snez a1, a1
 ; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; RV64-NEXT:    vmv.v.x v8, a1
 ; RV64-NEXT:    vmsne.vi v0, v8, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
index ba8486780197..2210ed4720c5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
@@ -217,7 +217,7 @@ define i1 @extractelt_nxv1i1_idx0(<vscale x 1 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v8, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
   %b = icmp eq <vscale x 1 x i8> %a, zeroinitializer
@@ -232,7 +232,7 @@ define i1 @extractelt_nxv2i1_idx0(<vscale x 2 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v8, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
   %b = icmp eq <vscale x 2 x i8> %a, zeroinitializer
@@ -247,7 +247,7 @@ define i1 @extractelt_nxv4i1_idx0(<vscale x 4 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v8, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
   %b = icmp eq <vscale x 4 x i8> %a, zeroinitializer
@@ -262,7 +262,7 @@ define i1 @extractelt_nxv8i1_idx0(<vscale x 8 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmseq.vi v8, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
   %b = icmp eq <vscale x 8 x i8> %a, zeroinitializer
@@ -277,7 +277,7 @@ define i1 @extractelt_nxv16i1_idx0(<vscale x 16 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmseq.vi v10, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v10
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <vscale x 16 x i8>, <vscale x 16 x i8>* %x
   %b = icmp eq <vscale x 16 x i8> %a, zeroinitializer
@@ -292,7 +292,7 @@ define i1 @extractelt_nxv32i1_idx0(<vscale x 32 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vmseq.vi v12, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v12
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <vscale x 32 x i8>, <vscale x 32 x i8>* %x
   %b = icmp eq <vscale x 32 x i8> %a, zeroinitializer
@@ -307,7 +307,7 @@ define i1 @extractelt_nxv64i1_idx0(<vscale x 64 x i8>* %x) nounwind {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vmseq.vi v16, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v16
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <vscale x 64 x i8>, <vscale x 64 x i8>* %x
   %b = icmp eq <vscale x 64 x i8> %a, zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll
index 526aee382589..fcd44c94c056 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll
@@ -20,7 +20,7 @@ define <2 x i1> @reverse_v2i1(<2 x i1> %a) {
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
@@ -45,7 +45,7 @@ define <4 x i1> @reverse_v4i1(<4 x i1> %a) {
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
@@ -82,7 +82,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
 ; RV32-BITS-UNKNOWN-NEXT:    srli a0, a0, 31
 ; RV32-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-UNKNOWN-NEXT:    vfirst.m a0, v0
-; RV32-BITS-UNKNOWN-NEXT:    seqz a0, a0
+; RV32-BITS-UNKNOWN-NEXT:    snez a0, a0
 ; RV32-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-UNKNOWN-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-UNKNOWN-NEXT:    vmsne.vi v0, v8, 0
@@ -115,7 +115,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
 ; RV32-BITS-256-NEXT:    srli a0, a0, 31
 ; RV32-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-256-NEXT:    vfirst.m a0, v0
-; RV32-BITS-256-NEXT:    seqz a0, a0
+; RV32-BITS-256-NEXT:    snez a0, a0
 ; RV32-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-256-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-256-NEXT:    vmsne.vi v0, v8, 0
@@ -148,7 +148,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
 ; RV32-BITS-512-NEXT:    srli a0, a0, 31
 ; RV32-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-512-NEXT:    vfirst.m a0, v0
-; RV32-BITS-512-NEXT:    seqz a0, a0
+; RV32-BITS-512-NEXT:    snez a0, a0
 ; RV32-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-512-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-512-NEXT:    vmsne.vi v0, v8, 0
@@ -181,7 +181,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
 ; RV64-BITS-UNKNOWN-NEXT:    srli a0, a0, 63
 ; RV64-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vfirst.m a0, v0
-; RV64-BITS-UNKNOWN-NEXT:    seqz a0, a0
+; RV64-BITS-UNKNOWN-NEXT:    snez a0, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-UNKNOWN-NEXT:    vmsne.vi v0, v8, 0
@@ -214,7 +214,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
 ; RV64-BITS-256-NEXT:    srli a0, a0, 63
 ; RV64-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-256-NEXT:    vfirst.m a0, v0
-; RV64-BITS-256-NEXT:    seqz a0, a0
+; RV64-BITS-256-NEXT:    snez a0, a0
 ; RV64-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-256-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-256-NEXT:    vmsne.vi v0, v8, 0
@@ -247,7 +247,7 @@ define <8 x i1> @reverse_v8i1(<8 x i1> %a) {
 ; RV64-BITS-512-NEXT:    srli a0, a0, 63
 ; RV64-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-512-NEXT:    vfirst.m a0, v0
-; RV64-BITS-512-NEXT:    seqz a0, a0
+; RV64-BITS-512-NEXT:    snez a0, a0
 ; RV64-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-512-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-512-NEXT:    vmsne.vi v0, v8, 0
@@ -308,7 +308,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
 ; RV32-BITS-UNKNOWN-NEXT:    srli a0, a0, 31
 ; RV32-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-UNKNOWN-NEXT:    vfirst.m a0, v0
-; RV32-BITS-UNKNOWN-NEXT:    seqz a0, a0
+; RV32-BITS-UNKNOWN-NEXT:    snez a0, a0
 ; RV32-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-UNKNOWN-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-UNKNOWN-NEXT:    vmsne.vi v0, v8, 0
@@ -365,7 +365,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
 ; RV32-BITS-256-NEXT:    srli a0, a0, 31
 ; RV32-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-256-NEXT:    vfirst.m a0, v0
-; RV32-BITS-256-NEXT:    seqz a0, a0
+; RV32-BITS-256-NEXT:    snez a0, a0
 ; RV32-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-256-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-256-NEXT:    vmsne.vi v0, v8, 0
@@ -422,7 +422,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
 ; RV32-BITS-512-NEXT:    srli a0, a0, 31
 ; RV32-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-512-NEXT:    vfirst.m a0, v0
-; RV32-BITS-512-NEXT:    seqz a0, a0
+; RV32-BITS-512-NEXT:    snez a0, a0
 ; RV32-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-512-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-512-NEXT:    vmsne.vi v0, v8, 0
@@ -479,7 +479,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
 ; RV64-BITS-UNKNOWN-NEXT:    srli a0, a0, 63
 ; RV64-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vfirst.m a0, v0
-; RV64-BITS-UNKNOWN-NEXT:    seqz a0, a0
+; RV64-BITS-UNKNOWN-NEXT:    snez a0, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-UNKNOWN-NEXT:    vmsne.vi v0, v8, 0
@@ -536,7 +536,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
 ; RV64-BITS-256-NEXT:    srli a0, a0, 63
 ; RV64-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-256-NEXT:    vfirst.m a0, v0
-; RV64-BITS-256-NEXT:    seqz a0, a0
+; RV64-BITS-256-NEXT:    snez a0, a0
 ; RV64-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-256-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-256-NEXT:    vmsne.vi v0, v8, 0
@@ -593,7 +593,7 @@ define <16 x i1> @reverse_v16i1(<16 x i1> %a) {
 ; RV64-BITS-512-NEXT:    srli a0, a0, 63
 ; RV64-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-512-NEXT:    vfirst.m a0, v0
-; RV64-BITS-512-NEXT:    seqz a0, a0
+; RV64-BITS-512-NEXT:    snez a0, a0
 ; RV64-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-512-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-512-NEXT:    vmsne.vi v0, v8, 0
@@ -702,7 +702,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
 ; RV32-BITS-UNKNOWN-NEXT:    srli a0, a0, 31
 ; RV32-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-UNKNOWN-NEXT:    vfirst.m a0, v0
-; RV32-BITS-UNKNOWN-NEXT:    seqz a0, a0
+; RV32-BITS-UNKNOWN-NEXT:    snez a0, a0
 ; RV32-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-UNKNOWN-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-UNKNOWN-NEXT:    vmsne.vi v0, v8, 0
@@ -807,7 +807,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
 ; RV32-BITS-256-NEXT:    srli a0, a0, 31
 ; RV32-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-256-NEXT:    vfirst.m a0, v0
-; RV32-BITS-256-NEXT:    seqz a0, a0
+; RV32-BITS-256-NEXT:    snez a0, a0
 ; RV32-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-256-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-256-NEXT:    vmsne.vi v0, v8, 0
@@ -912,7 +912,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
 ; RV32-BITS-512-NEXT:    srli a0, a0, 31
 ; RV32-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-512-NEXT:    vfirst.m a0, v0
-; RV32-BITS-512-NEXT:    seqz a0, a0
+; RV32-BITS-512-NEXT:    snez a0, a0
 ; RV32-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-512-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-512-NEXT:    vmsne.vi v0, v8, 0
@@ -1017,7 +1017,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
 ; RV64-BITS-UNKNOWN-NEXT:    srli a0, a0, 63
 ; RV64-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vfirst.m a0, v0
-; RV64-BITS-UNKNOWN-NEXT:    seqz a0, a0
+; RV64-BITS-UNKNOWN-NEXT:    snez a0, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-UNKNOWN-NEXT:    vmsne.vi v0, v8, 0
@@ -1122,7 +1122,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
 ; RV64-BITS-256-NEXT:    srli a0, a0, 63
 ; RV64-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-256-NEXT:    vfirst.m a0, v0
-; RV64-BITS-256-NEXT:    seqz a0, a0
+; RV64-BITS-256-NEXT:    snez a0, a0
 ; RV64-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-256-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-256-NEXT:    vmsne.vi v0, v8, 0
@@ -1227,7 +1227,7 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
 ; RV64-BITS-512-NEXT:    srli a0, a0, 63
 ; RV64-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-512-NEXT:    vfirst.m a0, v0
-; RV64-BITS-512-NEXT:    seqz a0, a0
+; RV64-BITS-512-NEXT:    snez a0, a0
 ; RV64-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-512-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-512-NEXT:    vmsne.vi v0, v8, 0
@@ -1434,7 +1434,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
 ; RV32-BITS-UNKNOWN-NEXT:    srli a1, a1, 31
 ; RV32-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a1
 ; RV32-BITS-UNKNOWN-NEXT:    vfirst.m a0, v0
-; RV32-BITS-UNKNOWN-NEXT:    seqz a0, a0
+; RV32-BITS-UNKNOWN-NEXT:    snez a0, a0
 ; RV32-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-UNKNOWN-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-UNKNOWN-NEXT:    vmsne.vi v0, v8, 0
@@ -1637,7 +1637,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
 ; RV32-BITS-256-NEXT:    srli a1, a1, 31
 ; RV32-BITS-256-NEXT:    vslide1down.vx v8, v8, a1
 ; RV32-BITS-256-NEXT:    vfirst.m a0, v0
-; RV32-BITS-256-NEXT:    seqz a0, a0
+; RV32-BITS-256-NEXT:    snez a0, a0
 ; RV32-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-256-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-256-NEXT:    vmsne.vi v0, v8, 0
@@ -1840,7 +1840,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
 ; RV32-BITS-512-NEXT:    srli a1, a1, 31
 ; RV32-BITS-512-NEXT:    vslide1down.vx v8, v8, a1
 ; RV32-BITS-512-NEXT:    vfirst.m a0, v0
-; RV32-BITS-512-NEXT:    seqz a0, a0
+; RV32-BITS-512-NEXT:    snez a0, a0
 ; RV32-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-BITS-512-NEXT:    vand.vi v8, v8, 1
 ; RV32-BITS-512-NEXT:    vmsne.vi v0, v8, 0
@@ -2040,7 +2040,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
 ; RV64-BITS-UNKNOWN-NEXT:    srli a0, a0, 63
 ; RV64-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vfirst.m a0, v0
-; RV64-BITS-UNKNOWN-NEXT:    seqz a0, a0
+; RV64-BITS-UNKNOWN-NEXT:    snez a0, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-UNKNOWN-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-UNKNOWN-NEXT:    vmsne.vi v0, v8, 0
@@ -2240,7 +2240,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
 ; RV64-BITS-256-NEXT:    srli a0, a0, 63
 ; RV64-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-256-NEXT:    vfirst.m a0, v0
-; RV64-BITS-256-NEXT:    seqz a0, a0
+; RV64-BITS-256-NEXT:    snez a0, a0
 ; RV64-BITS-256-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-256-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-256-NEXT:    vmsne.vi v0, v8, 0
@@ -2440,7 +2440,7 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
 ; RV64-BITS-512-NEXT:    srli a0, a0, 63
 ; RV64-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-512-NEXT:    vfirst.m a0, v0
-; RV64-BITS-512-NEXT:    seqz a0, a0
+; RV64-BITS-512-NEXT:    snez a0, a0
 ; RV64-BITS-512-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-BITS-512-NEXT:    vand.vi v8, v8, 1
 ; RV64-BITS-512-NEXT:    vmsne.vi v0, v8, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
index 07656c5405f7..2ee0e44253d6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
@@ -453,7 +453,7 @@ define i1 @extractelt_v1i1_idx0(ptr %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v8, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <1 x i8>, ptr %x
   %b = icmp eq <1 x i8> %a, zeroinitializer
@@ -468,7 +468,7 @@ define i1 @extractelt_v2i1_idx0(ptr %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v8, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <2 x i8>, ptr %x
   %b = icmp eq <2 x i8> %a, zeroinitializer
@@ -483,7 +483,7 @@ define i1 @extractelt_v4i1_idx0(ptr %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v8, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <4 x i8>, ptr %x
   %b = icmp eq <4 x i8> %a, zeroinitializer
@@ -498,7 +498,7 @@ define i1 @extractelt_v8i1_idx0(ptr %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v8, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, ptr %x
   %b = icmp eq <8 x i8> %a, zeroinitializer
@@ -513,7 +513,7 @@ define i1 @extractelt_v16i1_idx0(ptr %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v8, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v8
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
   %b = icmp eq <16 x i8> %a, zeroinitializer
@@ -529,7 +529,7 @@ define i1 @extractelt_v32i1_idx0(ptr %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v10, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v10
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, ptr %x
   %b = icmp eq <32 x i8> %a, zeroinitializer
@@ -545,7 +545,7 @@ define i1 @extractelt_v64i1_idx0(ptr %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v12, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v12
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, ptr %x
   %b = icmp eq <64 x i8> %a, zeroinitializer
@@ -561,7 +561,7 @@ define i1 @extractelt_v128i1_idx0(ptr %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v16, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v16
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, ptr %x
   %b = icmp eq <128 x i8> %a, zeroinitializer
@@ -577,7 +577,7 @@ define i1 @extractelt_v256i1_idx0(ptr %x) nounwind {
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v16, v8, 0
 ; CHECK-NEXT:    vfirst.m a0, v16
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %a = load <256 x i8>, ptr %x
   %b = icmp eq <256 x i8> %a, zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index a30afd02bdd5..946e7d2aaa3d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -36,7 +36,7 @@ define <1 x i8> @mgather_v1i8(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i8> %passthru)
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB0_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB0_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vle8.v v8, (a0)
@@ -870,7 +870,7 @@ define <1 x i16> @mgather_v1i16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i16> %passthr
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB13_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB13_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vle16.v v8, (a0)
@@ -2067,7 +2067,7 @@ define <1 x i32> @mgather_v1i32(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i32> %passthr
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB27_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB27_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vle32.v v8, (a0)
@@ -3607,7 +3607,7 @@ define <1 x i64> @mgather_v1i64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i64> %passthr
 ; RV32ZVE32F:       # %bb.0:
 ; RV32ZVE32F-NEXT:    vsetvli a2, zero, e8, mf4, ta, ma
 ; RV32ZVE32F-NEXT:    vfirst.m a2, v0
-; RV32ZVE32F-NEXT:    bnez a2, .LBB42_2
+; RV32ZVE32F-NEXT:    beqz a2, .LBB42_2
 ; RV32ZVE32F-NEXT:  # %bb.1: # %cond.load
 ; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v8
@@ -3620,7 +3620,7 @@ define <1 x i64> @mgather_v1i64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x i64> %passthr
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a2, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a2, v0
-; RV64ZVE32F-NEXT:    bnez a2, .LBB42_2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB42_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
 ; RV64ZVE32F-NEXT:    ld a1, 0(a0)
 ; RV64ZVE32F-NEXT:  .LBB42_2: # %else
@@ -7060,7 +7060,7 @@ define <1 x half> @mgather_v1f16(<1 x ptr> %ptrs, <1 x i1> %m, <1 x half> %passt
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB58_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB58_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vle16.v v8, (a0)
@@ -8023,7 +8023,7 @@ define <1 x float> @mgather_v1f32(<1 x ptr> %ptrs, <1 x i1> %m, <1 x float> %pas
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB68_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB68_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vle32.v v8, (a0)
@@ -9437,7 +9437,7 @@ define <1 x double> @mgather_v1f64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x double> %p
 ; RV32ZVE32F:       # %bb.0:
 ; RV32ZVE32F-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; RV32ZVE32F-NEXT:    vfirst.m a0, v0
-; RV32ZVE32F-NEXT:    bnez a0, .LBB81_2
+; RV32ZVE32F-NEXT:    beqz a0, .LBB81_2
 ; RV32ZVE32F-NEXT:  # %bb.1: # %cond.load
 ; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v8
@@ -9449,7 +9449,7 @@ define <1 x double> @mgather_v1f64(<1 x ptr> %ptrs, <1 x i1> %m, <1 x double> %p
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB81_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB81_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
 ; RV64ZVE32F-NEXT:    fld fa0, 0(a0)
 ; RV64ZVE32F-NEXT:  .LBB81_2: # %else

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index 9b2e14d4bab8..1da6953cf28a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -33,7 +33,7 @@ define void @mscatter_v1i8(<1 x i8> %val, <1 x ptr> %ptrs, <1 x i1> %m) {
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB0_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB0_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vse8.v v8, (a0)
@@ -637,7 +637,7 @@ define void @mscatter_v1i16(<1 x i16> %val, <1 x ptr> %ptrs, <1 x i1> %m) {
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB10_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB10_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
@@ -1593,7 +1593,7 @@ define void @mscatter_v1i32(<1 x i32> %val, <1 x ptr> %ptrs, <1 x i1> %m) {
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB22_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB22_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vse32.v v8, (a0)
@@ -2887,7 +2887,7 @@ define void @mscatter_v1i64(<1 x i64> %val, <1 x ptr> %ptrs, <1 x i1> %m) {
 ; RV32ZVE32F:       # %bb.0:
 ; RV32ZVE32F-NEXT:    vsetvli a2, zero, e8, mf4, ta, ma
 ; RV32ZVE32F-NEXT:    vfirst.m a2, v0
-; RV32ZVE32F-NEXT:    bnez a2, .LBB36_2
+; RV32ZVE32F-NEXT:    beqz a2, .LBB36_2
 ; RV32ZVE32F-NEXT:  # %bb.1: # %cond.store
 ; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vmv.x.s a2, v8
@@ -2900,7 +2900,7 @@ define void @mscatter_v1i64(<1 x i64> %val, <1 x ptr> %ptrs, <1 x i1> %m) {
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a2, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a2, v0
-; RV64ZVE32F-NEXT:    bnez a2, .LBB36_2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB36_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.store
 ; RV64ZVE32F-NEXT:    sd a0, 0(a1)
 ; RV64ZVE32F-NEXT:  .LBB36_2: # %else
@@ -5974,7 +5974,7 @@ define void @mscatter_v1f16(<1 x half> %val, <1 x ptr> %ptrs, <1 x i1> %m) {
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB52_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB52_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
@@ -6820,7 +6820,7 @@ define void @mscatter_v1f32(<1 x float> %val, <1 x ptr> %ptrs, <1 x i1> %m) {
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB62_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB62_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.store
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vse32.v v8, (a0)
@@ -8060,7 +8060,7 @@ define void @mscatter_v1f64(<1 x double> %val, <1 x ptr> %ptrs, <1 x i1> %m) {
 ; RV32ZVE32F:       # %bb.0:
 ; RV32ZVE32F-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; RV32ZVE32F-NEXT:    vfirst.m a0, v0
-; RV32ZVE32F-NEXT:    bnez a0, .LBB75_2
+; RV32ZVE32F-NEXT:    beqz a0, .LBB75_2
 ; RV32ZVE32F-NEXT:  # %bb.1: # %cond.store
 ; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v8
@@ -8072,7 +8072,7 @@ define void @mscatter_v1f64(<1 x double> %val, <1 x ptr> %ptrs, <1 x i1> %m) {
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vfirst.m a1, v0
-; RV64ZVE32F-NEXT:    bnez a1, .LBB75_2
+; RV64ZVE32F-NEXT:    beqz a1, .LBB75_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.store
 ; RV64ZVE32F-NEXT:    fsd fa0, 0(a0)
 ; RV64ZVE32F-NEXT:  .LBB75_2: # %else

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll
index 3814edeaa30c..0067b8df6a0f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll
@@ -209,7 +209,7 @@ define void @store_v6i1(ptr %p, <6 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vfirst.m a1, v0
-; CHECK-NEXT:    seqz a1, a1
+; CHECK-NEXT:    snez a1, a1
 ; CHECK-NEXT:    vmv.x.s a2, v0
 ; CHECK-NEXT:    andi a3, a2, 2
 ; CHECK-NEXT:    or a1, a1, a3

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
index 9d7564d4dda5..53d12da198e6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
@@ -11,7 +11,7 @@ define zeroext i1 @vreduce_or_v1i1(<1 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %v)
   ret i1 %red
@@ -24,7 +24,7 @@ define zeroext i1 @vreduce_xor_v1i1(<1 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %v)
   ret i1 %red
@@ -37,7 +37,7 @@ define zeroext i1 @vreduce_and_v1i1(<1 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %v)
   ret i1 %red
@@ -50,7 +50,7 @@ define zeroext i1 @vreduce_umax_v1i1(<1 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umax.v1i1(<1 x i1> %v)
   ret i1 %red
@@ -63,7 +63,7 @@ define zeroext i1 @vreduce_smax_v1i1(<1 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smax.v1i1(<1 x i1> %v)
   ret i1 %red
@@ -76,7 +76,7 @@ define zeroext i1 @vreduce_umin_v1i1(<1 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.umin.v1i1(<1 x i1> %v)
   ret i1 %red
@@ -89,7 +89,7 @@ define zeroext i1 @vreduce_smin_v1i1(<1 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.smin.v1i1(<1 x i1> %v)
   ret i1 %red
@@ -806,7 +806,7 @@ define zeroext i1 @vreduce_add_v1i1(<1 x i1> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    seqz a0, a0
+; CHECK-NEXT:    snez a0, a0
 ; CHECK-NEXT:    ret
   %red = call i1 @llvm.vector.reduce.add.v1i1(<1 x i1> %v)
   ret i1 %red

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
index 67eb190e8cb3..968f18f49e49 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
@@ -9,7 +9,7 @@ define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; RV32-NEXT:    vfirst.m a0, v0
-; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    snez a0, a0
 ; RV32-NEXT:    vslide1down.vx v8, v8, a0
 ; RV32-NEXT:    vsetivli zero, 0, e16, mf4, ta, ma
 ; RV32-NEXT:    vmv.x.s a0, v0
@@ -39,7 +39,7 @@ define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) {
 ; RV32-NEXT:    vslidedown.vi v9, v0, 2
 ; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; RV32-NEXT:    vfirst.m a1, v9
-; RV32-NEXT:    seqz a1, a1
+; RV32-NEXT:    snez a1, a1
 ; RV32-NEXT:    vslide1down.vx v8, v8, a1
 ; RV32-NEXT:    vsetivli zero, 0, e16, mf4, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v9
@@ -123,7 +123,7 @@ define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; RV64-NEXT:    vfirst.m a0, v0
-; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    snez a0, a0
 ; RV64-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64-NEXT:    vsetivli zero, 0, e16, mf4, ta, ma
 ; RV64-NEXT:    vmv.x.s a0, v0
@@ -153,7 +153,7 @@ define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) {
 ; RV64-NEXT:    vslidedown.vi v9, v0, 2
 ; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; RV64-NEXT:    vfirst.m a1, v9
-; RV64-NEXT:    seqz a1, a1
+; RV64-NEXT:    snez a1, a1
 ; RV64-NEXT:    vslide1down.vx v8, v8, a1
 ; RV64-NEXT:    vsetivli zero, 0, e16, mf4, ta, ma
 ; RV64-NEXT:    vmv.x.s a1, v9

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
index ab2882fe95f9..ecdfa3559523 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
@@ -9,13 +9,13 @@ define <32 x i1> @vector_interleave_v32i1_v16i1(<16 x i1> %a, <16 x i1> %b) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; RV32-NEXT:    vfirst.m a0, v0
-; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    snez a0, a0
 ; RV32-NEXT:    li a2, 32
 ; RV32-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; RV32-NEXT:    vslide1down.vx v10, v8, a0
 ; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; RV32-NEXT:    vfirst.m a0, v8
-; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    snez a0, a0
 ; RV32-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; RV32-NEXT:    vslide1down.vx v10, v10, a0
 ; RV32-NEXT:    vsetivli zero, 0, e16, mf4, ta, ma
@@ -122,13 +122,13 @@ define <32 x i1> @vector_interleave_v32i1_v16i1(<16 x i1> %a, <16 x i1> %b) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; RV64-NEXT:    vfirst.m a0, v0
-; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    snez a0, a0
 ; RV64-NEXT:    li a2, 32
 ; RV64-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; RV64-NEXT:    vslide1down.vx v10, v8, a0
 ; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; RV64-NEXT:    vfirst.m a0, v8
-; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    snez a0, a0
 ; RV64-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; RV64-NEXT:    vslide1down.vx v10, v10, a0
 ; RV64-NEXT:    vsetivli zero, 0, e16, mf4, ta, ma


        


More information about the llvm-commits mailing list