[llvm] 5a5219a - [RISCV] Remove earlyclobber from compares with LMUL<=1.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 1 09:08:35 PDT 2021


Author: Craig Topper
Date: 2021-06-01T09:08:11-07:00
New Revision: 5a5219a0f961b91253dc3fdb4b33e088b199b451

URL: https://github.com/llvm/llvm-project/commit/5a5219a0f961b91253dc3fdb4b33e088b199b451
DIFF: https://github.com/llvm/llvm-project/commit/5a5219a0f961b91253dc3fdb4b33e088b199b451.diff

LOG: [RISCV] Remove earlyclobber from compares with LMUL<=1.

Compares are considered a narrowing operation for register overlap.
I believe for LMUL<=1 they meet this exception to allow overlap

"The destination EEW is smaller than the source EEW and the overlap is in the
lowest-numbered part of the source register group"

Both the result and the sources will occupy a single register for
LMUL<=1 so the overlap would always be in the "lowest-numbered part".

Reviewed By: frasercrmck, HsiangKai

Differential Revision: https://reviews.llvm.org/D103336

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll
    llvm/test/CodeGen/RISCV/rvv/select-int.ll
    llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index e5103f585337c..c672113f739a6 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1695,31 +1695,42 @@ multiclass PseudoUnaryV_VF8 {
   }
 }
 
-// The destination EEW is 1.
+// The destination EEW is 1 since "For the purposes of register group overlap
+// constraints, mask elements have EEW=1."
 // The source EEW is 8, 16, 32, or 64.
 // When the destination EEW is 
diff erent from source EEW, we need to use
 // @earlyclobber to avoid the overlap between destination and source registers.
+// We don't need @earlyclobber for LMUL<=1 since that matches this overlap
+// exception from the spec
+// "The destination EEW is smaller than the source EEW and the overlap is in the
+//  lowest-numbered part of the source register group".
+// With LMUL<=1 the source and dest occupy a single register so any overlap
+// is in the lowest-numbered part.
 multiclass VPseudoBinaryM_VV {
   foreach m = MxList.m in
-    defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m, "@earlyclobber $rd">;
+    defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m,
+                              !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
 }
 
 multiclass VPseudoBinaryM_VX {
   foreach m = MxList.m in
     defm "_VX" :
-      VPseudoBinaryM<VR, m.vrclass, GPR, m, "@earlyclobber $rd">;
+      VPseudoBinaryM<VR, m.vrclass, GPR, m,
+                     !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
 }
 
 multiclass VPseudoBinaryM_VF {
   foreach m = MxList.m in
     foreach f = FPList.fpinfo in
       defm "_V" # f.FX :
-        VPseudoBinaryM<VR, m.vrclass, f.fprclass, m, "@earlyclobber $rd">;
+        VPseudoBinaryM<VR, m.vrclass, f.fprclass, m,
+                       !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
 }
 
 multiclass VPseudoBinaryM_VI {
   foreach m = MxList.m in
-    defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m, "@earlyclobber $rd">;
+    defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m,
+                              !if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
 }
 
 multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
index bb7f4d0296c87..ee4006a5915b9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
@@ -398,8 +398,8 @@ define void @truncstore_nxv1i8_nxv1i1(<vscale x 1 x i8> %x, <vscale x 1 x i1> *%
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vi v25, v8, 1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vse1.v v26, (a0)
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
   %y = trunc <vscale x 1 x i8> %x to <vscale x 1 x i1>
   store <vscale x 1 x i1> %y, <vscale x 1 x i1>* %z

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
index 769e12168f108..10c7defe98e04 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
@@ -306,8 +306,8 @@ define void @extract_v8i1_nxv2i1_2(<vscale x 2 x i1> %x, <8 x i1>* %y) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslidedown.vi v25, v25, 2
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vse1.v v26, (a0)
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
   %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv2i1(<vscale x 2 x i1> %x, i64 2)
   store <8 x i1> %c, <8 x i1>* %y

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
index d4b5382f66039..619ebb74b3e98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
@@ -8,9 +8,9 @@ define void @fcmp_oeq_vv_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
-; CHECK-NEXT:    vmfeq.vv v27, v25, v26
+; CHECK-NEXT:    vmfeq.vv v25, v25, v26
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
-; CHECK-NEXT:    vse1.v v27, (a2)
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = load <8 x half>, <8 x half>* %y
@@ -25,9 +25,9 @@ define void @fcmp_oeq_vv_v8f16_nonans(<8 x half>* %x, <8 x half>* %y, <8 x i1>*
 ; CHECK-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vle16.v v26, (a1)
-; CHECK-NEXT:    vmfeq.vv v27, v25, v26
+; CHECK-NEXT:    vmfeq.vv v25, v25, v26
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
-; CHECK-NEXT:    vse1.v v27, (a2)
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = load <8 x half>, <8 x half>* %y
@@ -478,10 +478,10 @@ define void @fcmp_ord_vv_v4f16(<4 x half>* %x, <4 x half>* %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a1)
 ; CHECK-NEXT:    vle16.v v26, (a0)
-; CHECK-NEXT:    vmfeq.vv v27, v25, v25
-; CHECK-NEXT:    vmfeq.vv v25, v26, v26
+; CHECK-NEXT:    vmfeq.vv v25, v25, v25
+; CHECK-NEXT:    vmfeq.vv v26, v26, v26
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
-; CHECK-NEXT:    vmand.mm v0, v25, v27
+; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
@@ -505,10 +505,10 @@ define void @fcmp_uno_vv_v4f16(<2 x half>* %x, <2 x half>* %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a1)
 ; CHECK-NEXT:    vle16.v v26, (a0)
-; CHECK-NEXT:    vmfne.vv v27, v25, v25
-; CHECK-NEXT:    vmfne.vv v25, v26, v26
+; CHECK-NEXT:    vmfne.vv v25, v25, v25
+; CHECK-NEXT:    vmfne.vv v26, v26, v26
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
-; CHECK-NEXT:    vmor.mm v0, v25, v27
+; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
@@ -531,9 +531,9 @@ define void @fcmp_oeq_vf_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
-; CHECK-NEXT:    vmfeq.vf v26, v25, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v25, fa0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
-; CHECK-NEXT:    vse1.v v26, (a1)
+; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = insertelement <8 x half> undef, half %y, i32 0
@@ -548,9 +548,9 @@ define void @fcmp_oeq_vf_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
-; CHECK-NEXT:    vmfeq.vf v26, v25, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v25, fa0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
-; CHECK-NEXT:    vse1.v v26, (a1)
+; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = insertelement <8 x half> undef, half %y, i32 0
@@ -1002,10 +1002,10 @@ define void @fcmp_ord_vf_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
-; CHECK-NEXT:    vmfeq.vf v27, v26, fa0
-; CHECK-NEXT:    vmfeq.vv v26, v25, v25
+; CHECK-NEXT:    vmfeq.vf v26, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v25, v25, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
-; CHECK-NEXT:    vmand.mm v0, v26, v27
+; CHECK-NEXT:    vmand.mm v0, v25, v26
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
@@ -1030,10 +1030,10 @@ define void @fcmp_uno_vf_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
-; CHECK-NEXT:    vmfne.vf v27, v26, fa0
-; CHECK-NEXT:    vmfne.vv v26, v25, v25
+; CHECK-NEXT:    vmfne.vf v26, v26, fa0
+; CHECK-NEXT:    vmfne.vv v25, v25, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
-; CHECK-NEXT:    vmor.mm v0, v26, v27
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
@@ -1057,9 +1057,9 @@ define void @fcmp_oeq_fv_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
-; CHECK-NEXT:    vmfeq.vf v26, v25, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v25, fa0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
-; CHECK-NEXT:    vse1.v v26, (a1)
+; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = insertelement <8 x half> undef, half %y, i32 0
@@ -1074,9 +1074,9 @@ define void @fcmp_oeq_fv_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
-; CHECK-NEXT:    vmfeq.vf v26, v25, fa0
+; CHECK-NEXT:    vmfeq.vf v25, v25, fa0
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
-; CHECK-NEXT:    vse1.v v26, (a1)
+; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = insertelement <8 x half> undef, half %y, i32 0
@@ -1528,10 +1528,10 @@ define void @fcmp_ord_fv_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
-; CHECK-NEXT:    vmfeq.vf v27, v26, fa0
-; CHECK-NEXT:    vmfeq.vv v26, v25, v25
+; CHECK-NEXT:    vmfeq.vf v26, v26, fa0
+; CHECK-NEXT:    vmfeq.vv v25, v25, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
-; CHECK-NEXT:    vmand.mm v0, v27, v26
+; CHECK-NEXT:    vmand.mm v0, v26, v25
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
@@ -1556,10 +1556,10 @@ define void @fcmp_uno_fv_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a0)
 ; CHECK-NEXT:    vfmv.v.f v26, fa0
-; CHECK-NEXT:    vmfne.vf v27, v26, fa0
-; CHECK-NEXT:    vmfne.vv v26, v25, v25
+; CHECK-NEXT:    vmfne.vf v26, v26, fa0
+; CHECK-NEXT:    vmfne.vv v25, v25, v25
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
-; CHECK-NEXT:    vmor.mm v0, v27, v26
+; CHECK-NEXT:    vmor.mm v0, v26, v25
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
index af6e727c4263c..7aa10c64130a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
@@ -310,7 +310,7 @@ define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v26, 0
 ; LMULMAX1-NEXT:    vmerge.vim v27, v26, 1, v0
-; LMULMAX1-NEXT:    vmv1r.v v29, v0
+; LMULMAX1-NEXT:    vmv1r.v v28, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf4,ta,mu
 ; LMULMAX1-NEXT:    vslidedown.vi v27, v27, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
@@ -320,7 +320,7 @@ define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
 ; LMULMAX1-NEXT:    vfcvt.f.x.v v9, v27
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v27, 0
-; LMULMAX1-NEXT:    vmv1r.v v0, v29
+; LMULMAX1-NEXT:    vmv1r.v v0, v28
 ; LMULMAX1-NEXT:    vmerge.vim v27, v27, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf2,ta,mu
 ; LMULMAX1-NEXT:    vslidedown.vi v27, v27, 4
@@ -361,7 +361,7 @@ define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v26, 0
 ; LMULMAX1-NEXT:    vmerge.vim v27, v26, 1, v0
-; LMULMAX1-NEXT:    vmv1r.v v29, v0
+; LMULMAX1-NEXT:    vmv1r.v v28, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf4,ta,mu
 ; LMULMAX1-NEXT:    vslidedown.vi v27, v27, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
@@ -371,7 +371,7 @@ define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
 ; LMULMAX1-NEXT:    vfcvt.f.xu.v v9, v27
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; LMULMAX1-NEXT:    vmv.v.i v27, 0
-; LMULMAX1-NEXT:    vmv1r.v v0, v29
+; LMULMAX1-NEXT:    vmv1r.v v0, v28
 ; LMULMAX1-NEXT:    vmerge.vim v27, v27, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf2,ta,mu
 ; LMULMAX1-NEXT:    vslidedown.vi v27, v27, 4

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 2888df28db872..35bd3a677a4c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -424,8 +424,8 @@ define void @insert_v8i1_v4i1_0(<8 x i1>* %vp, <4 x i1>* %svp) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v25, v26, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vse1.v v26, (a0)
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
   %v = load <8 x i1>, <8 x i1>* %vp
   %sv = load <4 x i1>, <4 x i1>* %svp
@@ -451,8 +451,8 @@ define void @insert_v8i1_v4i1_4(<8 x i1>* %vp, <4 x i1>* %svp) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v25, v26, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vse1.v v26, (a0)
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
   %v = load <8 x i1>, <8 x i1>* %vp
   %sv = load <4 x i1>, <4 x i1>* %svp

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
index 492992126a5af..d60b2512cb0d5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
@@ -85,8 +85,8 @@ define void @setge_vv_v8i8(<8 x i8>* %x, <8 x i8>* %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
-; CHECK-NEXT:    vmsle.vv v27, v26, v25
-; CHECK-NEXT:    vse1.v v27, (a2)
+; CHECK-NEXT:    vmsle.vv v25, v26, v25
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
   %b = load <8 x i8>, <8 x i8>* %y
@@ -101,8 +101,8 @@ define void @setle_vv_v16i8(<16 x i8>* %x, <16 x i8>* %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
-; CHECK-NEXT:    vmsle.vv v27, v25, v26
-; CHECK-NEXT:    vse1.v v27, (a2)
+; CHECK-NEXT:    vmsle.vv v25, v25, v26
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = load <16 x i8>, <16 x i8>* %y
@@ -168,8 +168,8 @@ define void @setule_vv_v8i8(<8 x i8>* %x, <8 x i8>* %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vle8.v v26, (a1)
-; CHECK-NEXT:    vmsleu.vv v27, v25, v26
-; CHECK-NEXT:    vse1.v v27, (a2)
+; CHECK-NEXT:    vmsleu.vv v25, v25, v26
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
   %b = load <8 x i8>, <8 x i8>* %y
@@ -183,8 +183,8 @@ define void @seteq_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
-; CHECK-NEXT:    vmseq.vx v26, v25, a1
-; CHECK-NEXT:    vse1.v v26, (a2)
+; CHECK-NEXT:    vmseq.vx v25, v25, a1
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = insertelement <16 x i8> undef, i8 %y, i32 0
@@ -251,8 +251,8 @@ define void @setge_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    vmsle.vv v27, v26, v25
-; CHECK-NEXT:    vse1.v v27, (a2)
+; CHECK-NEXT:    vmsle.vv v25, v26, v25
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
   %b = insertelement <8 x i8> undef, i8 %y, i32 0
@@ -267,8 +267,8 @@ define void @setle_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
-; CHECK-NEXT:    vmsle.vx v26, v25, a1
-; CHECK-NEXT:    vse1.v v26, (a2)
+; CHECK-NEXT:    vmsle.vx v25, v25, a1
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = insertelement <16 x i8> undef, i8 %y, i32 0
@@ -335,8 +335,8 @@ define void @setule_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
-; CHECK-NEXT:    vmsleu.vx v26, v25, a1
-; CHECK-NEXT:    vse1.v v26, (a2)
+; CHECK-NEXT:    vmsleu.vx v25, v25, a1
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
   %b = insertelement <8 x i8> undef, i8 %y, i32 0
@@ -351,8 +351,8 @@ define void @seteq_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
-; CHECK-NEXT:    vmseq.vx v26, v25, a1
-; CHECK-NEXT:    vse1.v v26, (a2)
+; CHECK-NEXT:    vmseq.vx v25, v25, a1
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = insertelement <16 x i8> undef, i8 %y, i32 0
@@ -418,8 +418,8 @@ define void @setge_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
-; CHECK-NEXT:    vmsle.vx v26, v25, a1
-; CHECK-NEXT:    vse1.v v26, (a2)
+; CHECK-NEXT:    vmsle.vx v25, v25, a1
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
   %b = insertelement <8 x i8> undef, i8 %y, i32 0
@@ -435,8 +435,8 @@ define void @setle_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    vmsle.vv v27, v26, v25
-; CHECK-NEXT:    vse1.v v27, (a2)
+; CHECK-NEXT:    vmsle.vv v25, v26, v25
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = insertelement <16 x i8> undef, i8 %y, i32 0
@@ -503,8 +503,8 @@ define void @setule_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
 ; CHECK-NEXT:    vmv.v.x v26, a1
-; CHECK-NEXT:    vmsleu.vv v27, v26, v25
-; CHECK-NEXT:    vse1.v v27, (a2)
+; CHECK-NEXT:    vmsleu.vv v25, v26, v25
+; CHECK-NEXT:    vse1.v v25, (a2)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
   %b = insertelement <8 x i8> undef, i8 %y, i32 0
@@ -519,8 +519,8 @@ define void @seteq_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
-; CHECK-NEXT:    vmseq.vi v26, v25, 0
-; CHECK-NEXT:    vse1.v v26, (a1)
+; CHECK-NEXT:    vmseq.vi v25, v25, 0
+; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = insertelement <16 x i8> undef, i8 0, i32 0
@@ -586,8 +586,8 @@ define void @setge_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
-; CHECK-NEXT:    vmsgt.vi v26, v25, -1
-; CHECK-NEXT:    vse1.v v26, (a1)
+; CHECK-NEXT:    vmsgt.vi v25, v25, -1
+; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
   %b = insertelement <8 x i8> undef, i8 0, i32 0
@@ -602,8 +602,8 @@ define void @setle_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
-; CHECK-NEXT:    vmsle.vi v26, v25, 0
-; CHECK-NEXT:    vse1.v v26, (a1)
+; CHECK-NEXT:    vmsle.vi v25, v25, 0
+; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = insertelement <16 x i8> undef, i8 0, i32 0
@@ -670,8 +670,8 @@ define void @setule_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a0)
-; CHECK-NEXT:    vmsleu.vi v26, v25, 5
-; CHECK-NEXT:    vse1.v v26, (a1)
+; CHECK-NEXT:    vmsleu.vi v25, v25, 5
+; CHECK-NEXT:    vse1.v v25, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, <8 x i8>* %x
   %b = insertelement <8 x i8> undef, i8 5, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
index 28a5b06322c6a..aa7db3aef8755 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
@@ -149,8 +149,8 @@ define void @splat_v8i1(<8 x i1>* %x, i1 %y) {
 ; CHECK-NEXT:    andi a1, a1, 1
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vse1.v v26, (a0)
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
   %a = insertelement <8 x i1> undef, i1 %y, i32 0
   %b = shufflevector <8 x i1> %a, <8 x i1> undef, <8 x i32> zeroinitializer
@@ -175,8 +175,8 @@ define void @splat_v16i1(<16 x i1>* %x, i1 %y) {
 ; CHECK-NEXT:    andi a1, a1, 1
 ; CHECK-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vse1.v v26, (a0)
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
   %a = insertelement <16 x i1> undef, i1 %y, i32 0
   %b = shufflevector <16 x i1> %a, <16 x i1> undef, <16 x i32> zeroinitializer
@@ -230,10 +230,10 @@ define void @splat_v32i1(<32 x i1>* %x, i1 %y) {
 ; LMULMAX1-RV32-NEXT:    andi a1, a1, 1
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v25, a1
-; LMULMAX1-RV32-NEXT:    vmsne.vi v26, v25, 0
+; LMULMAX1-RV32-NEXT:    vmsne.vi v25, v25, 0
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 2
-; LMULMAX1-RV32-NEXT:    vse1.v v26, (a1)
-; LMULMAX1-RV32-NEXT:    vse1.v v26, (a0)
+; LMULMAX1-RV32-NEXT:    vse1.v v25, (a1)
+; LMULMAX1-RV32-NEXT:    vse1.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    ret
 ;
 ; LMULMAX1-RV64-LABEL: splat_v32i1:
@@ -241,10 +241,10 @@ define void @splat_v32i1(<32 x i1>* %x, i1 %y) {
 ; LMULMAX1-RV64-NEXT:    andi a1, a1, 1
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vmv.v.x v25, a1
-; LMULMAX1-RV64-NEXT:    vmsne.vi v26, v25, 0
+; LMULMAX1-RV64-NEXT:    vmsne.vi v25, v25, 0
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 2
-; LMULMAX1-RV64-NEXT:    vse1.v v26, (a1)
-; LMULMAX1-RV64-NEXT:    vse1.v v26, (a0)
+; LMULMAX1-RV64-NEXT:    vse1.v v25, (a1)
+; LMULMAX1-RV64-NEXT:    vse1.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
   %a = insertelement <32 x i1> undef, i1 %y, i32 0
   %b = shufflevector <32 x i1> %a, <32 x i1> undef, <32 x i32> zeroinitializer
@@ -310,14 +310,14 @@ define void @splat_v64i1(<64 x i1>* %x, i1 %y) {
 ; LMULMAX1-RV32-NEXT:    andi a1, a1, 1
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v25, a1
-; LMULMAX1-RV32-NEXT:    vmsne.vi v26, v25, 0
+; LMULMAX1-RV32-NEXT:    vmsne.vi v25, v25, 0
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 6
-; LMULMAX1-RV32-NEXT:    vse1.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vse1.v v25, (a1)
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 4
-; LMULMAX1-RV32-NEXT:    vse1.v v26, (a1)
+; LMULMAX1-RV32-NEXT:    vse1.v v25, (a1)
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 2
-; LMULMAX1-RV32-NEXT:    vse1.v v26, (a1)
-; LMULMAX1-RV32-NEXT:    vse1.v v26, (a0)
+; LMULMAX1-RV32-NEXT:    vse1.v v25, (a1)
+; LMULMAX1-RV32-NEXT:    vse1.v v25, (a0)
 ; LMULMAX1-RV32-NEXT:    ret
 ;
 ; LMULMAX1-RV64-LABEL: splat_v64i1:
@@ -325,14 +325,14 @@ define void @splat_v64i1(<64 x i1>* %x, i1 %y) {
 ; LMULMAX1-RV64-NEXT:    andi a1, a1, 1
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; LMULMAX1-RV64-NEXT:    vmv.v.x v25, a1
-; LMULMAX1-RV64-NEXT:    vmsne.vi v26, v25, 0
+; LMULMAX1-RV64-NEXT:    vmsne.vi v25, v25, 0
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 6
-; LMULMAX1-RV64-NEXT:    vse1.v v26, (a1)
+; LMULMAX1-RV64-NEXT:    vse1.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 4
-; LMULMAX1-RV64-NEXT:    vse1.v v26, (a1)
+; LMULMAX1-RV64-NEXT:    vse1.v v25, (a1)
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 2
-; LMULMAX1-RV64-NEXT:    vse1.v v26, (a1)
-; LMULMAX1-RV64-NEXT:    vse1.v v26, (a0)
+; LMULMAX1-RV64-NEXT:    vse1.v v25, (a1)
+; LMULMAX1-RV64-NEXT:    vse1.v v25, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
   %a = insertelement <64 x i1> undef, i1 %y, i32 0
   %b = shufflevector <64 x i1> %a, <64 x i1> undef, <64 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll
index 32a3bdc03b989..965f843f49a86 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll
@@ -14,10 +14,10 @@ define <1 x i1> @select_v1i1(i1 zeroext %c, <1 x i1> %a, <1 x i1> %b) {
 ; CHECK-NEXT:  .LBB0_2:
 ; CHECK-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <1 x i1> %a, <1 x i1> %b
   ret <1 x i1> %v
@@ -35,10 +35,10 @@ define <1 x i1> @selectcc_v1i1(i1 signext %a, i1 signext %b, <1 x i1> %c, <1 x i
 ; CHECK-NEXT:  .LBB1_2:
 ; CHECK-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a0
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %cmp = icmp ne i1 %a, %b
   %v = select i1 %cmp, <1 x i1> %c, <1 x i1> %d
@@ -55,10 +55,10 @@ define <2 x i1> @select_v2i1(i1 zeroext %c, <2 x i1> %a, <2 x i1> %b) {
 ; CHECK-NEXT:  .LBB2_2:
 ; CHECK-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <2 x i1> %a, <2 x i1> %b
   ret <2 x i1> %v
@@ -76,10 +76,10 @@ define <2 x i1> @selectcc_v2i1(i1 signext %a, i1 signext %b, <2 x i1> %c, <2 x i
 ; CHECK-NEXT:  .LBB3_2:
 ; CHECK-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a0
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %cmp = icmp ne i1 %a, %b
   %v = select i1 %cmp, <2 x i1> %c, <2 x i1> %d
@@ -96,10 +96,10 @@ define <4 x i1> @select_v4i1(i1 zeroext %c, <4 x i1> %a, <4 x i1> %b) {
 ; CHECK-NEXT:  .LBB4_2:
 ; CHECK-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <4 x i1> %a, <4 x i1> %b
   ret <4 x i1> %v
@@ -117,10 +117,10 @@ define <4 x i1> @selectcc_v4i1(i1 signext %a, i1 signext %b, <4 x i1> %c, <4 x i
 ; CHECK-NEXT:  .LBB5_2:
 ; CHECK-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a0
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %cmp = icmp ne i1 %a, %b
   %v = select i1 %cmp, <4 x i1> %c, <4 x i1> %d
@@ -137,10 +137,10 @@ define <8 x i1> @select_v8i1(i1 zeroext %c, <8 x i1> %a, <8 x i1> %b) {
 ; CHECK-NEXT:  .LBB6_2:
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <8 x i1> %a, <8 x i1> %b
   ret <8 x i1> %v
@@ -158,10 +158,10 @@ define <8 x i1> @selectcc_v8i1(i1 signext %a, i1 signext %b, <8 x i1> %c, <8 x i
 ; CHECK-NEXT:  .LBB7_2:
 ; CHECK-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a0
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %cmp = icmp ne i1 %a, %b
   %v = select i1 %cmp, <8 x i1> %c, <8 x i1> %d
@@ -178,10 +178,10 @@ define <16 x i1> @select_v16i1(i1 zeroext %c, <16 x i1> %a, <16 x i1> %b) {
 ; CHECK-NEXT:  .LBB8_2:
 ; CHECK-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <16 x i1> %a, <16 x i1> %b
   ret <16 x i1> %v
@@ -199,10 +199,10 @@ define <16 x i1> @selectcc_v16i1(i1 signext %a, i1 signext %b, <16 x i1> %c, <16
 ; CHECK-NEXT:  .LBB9_2:
 ; CHECK-NEXT:    vsetivli zero, 16, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a0
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %cmp = icmp ne i1 %a, %b
   %v = select i1 %cmp, <16 x i1> %c, <16 x i1> %d

diff  --git a/llvm/test/CodeGen/RISCV/rvv/select-int.ll b/llvm/test/CodeGen/RISCV/rvv/select-int.ll
index 89c20e85d6faf..4cb9fcec7f0fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/select-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/select-int.ll
@@ -14,10 +14,10 @@ define <vscale x 1 x i1> @select_nxv1i1(i1 zeroext %c, <vscale x 1 x i1> %a, <vs
 ; CHECK-NEXT:  .LBB0_2:
 ; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <vscale x 1 x i1> %a, <vscale x 1 x i1> %b
   ret <vscale x 1 x i1> %v
@@ -35,10 +35,10 @@ define <vscale x 1 x i1> @selectcc_nxv1i1(i1 signext %a, i1 signext %b, <vscale
 ; CHECK-NEXT:  .LBB1_2:
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a0
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %cmp = icmp ne i1 %a, %b
   %v = select i1 %cmp, <vscale x 1 x i1> %c, <vscale x 1 x i1> %d
@@ -55,10 +55,10 @@ define <vscale x 2 x i1> @select_nxv2i1(i1 zeroext %c, <vscale x 2 x i1> %a, <vs
 ; CHECK-NEXT:  .LBB2_2:
 ; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <vscale x 2 x i1> %a, <vscale x 2 x i1> %b
   ret <vscale x 2 x i1> %v
@@ -76,10 +76,10 @@ define <vscale x 2 x i1> @selectcc_nxv2i1(i1 signext %a, i1 signext %b, <vscale
 ; CHECK-NEXT:  .LBB3_2:
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a0
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %cmp = icmp ne i1 %a, %b
   %v = select i1 %cmp, <vscale x 2 x i1> %c, <vscale x 2 x i1> %d
@@ -96,10 +96,10 @@ define <vscale x 4 x i1> @select_nxv4i1(i1 zeroext %c, <vscale x 4 x i1> %a, <vs
 ; CHECK-NEXT:  .LBB4_2:
 ; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <vscale x 4 x i1> %a, <vscale x 4 x i1> %b
   ret <vscale x 4 x i1> %v
@@ -117,10 +117,10 @@ define <vscale x 4 x i1> @selectcc_nxv4i1(i1 signext %a, i1 signext %b, <vscale
 ; CHECK-NEXT:  .LBB5_2:
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a0
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %cmp = icmp ne i1 %a, %b
   %v = select i1 %cmp, <vscale x 4 x i1> %c, <vscale x 4 x i1> %d
@@ -137,10 +137,10 @@ define <vscale x 8 x i1> @select_nxv8i1(i1 zeroext %c, <vscale x 8 x i1> %a, <vs
 ; CHECK-NEXT:  .LBB6_2:
 ; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a1
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %v = select i1 %c, <vscale x 8 x i1> %a, <vscale x 8 x i1> %b
   ret <vscale x 8 x i1> %v
@@ -158,10 +158,10 @@ define <vscale x 8 x i1> @selectcc_nxv8i1(i1 signext %a, i1 signext %b, <vscale
 ; CHECK-NEXT:  .LBB7_2:
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v25, a0
-; CHECK-NEXT:    vmsne.vi v26, v25, 0
-; CHECK-NEXT:    vmandnot.mm v25, v8, v26
-; CHECK-NEXT:    vmand.mm v26, v0, v26
-; CHECK-NEXT:    vmor.mm v0, v26, v25
+; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmandnot.mm v26, v8, v25
+; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmor.mm v0, v25, v26
 ; CHECK-NEXT:    ret
   %cmp = icmp ne i1 %a, %b
   %v = select i1 %cmp, <vscale x 8 x i1> %c, <vscale x 8 x i1> %d

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
index f31be2d424466..ae1a643cd4891 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
@@ -2761,11 +2761,9 @@ define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    sw a0, 8(sp)
 ; CHECK-NEXT:    vsetvli zero, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v26, (a0), zero
-; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsle.vv v0, v25, v8, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
index a09fd13e6cdef..f1cf775cced90 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
@@ -2761,11 +2761,9 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale
 ; CHECK-NEXT:    sw a0, 8(sp)
 ; CHECK-NEXT:    vsetvli zero, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v26, (a0), zero
-; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsetvli zero, zero, e64,m1,tu,mu
-; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmsleu.vv v0, v25, v8, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 entry:


        


More information about the llvm-commits mailing list