[llvm] b04c09c - [RISCV] Use V0 instead of VMV0: for mask vectors in isel patterns.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 8 09:48:09 PDT 2021


Author: Craig Topper
Date: 2021-09-08T09:46:21-07:00
New Revision: b04c09c07cfa073984b94dc1c8041201f43274d4

URL: https://github.com/llvm/llvm-project/commit/b04c09c07cfa073984b94dc1c8041201f43274d4
DIFF: https://github.com/llvm/llvm-project/commit/b04c09c07cfa073984b94dc1c8041201f43274d4.diff

LOG: [RISCV] Use V0 instead of VMV0: for mask vectors in isel patterns.

This is consistent with the RVV intrinsic patterns. This has been
shown to prevent some "ran out of registers" errors in our internal
testing.

Unfortunately, there are some regressions on LMUL=8 tests in here.
I think the lack of registers with LMUL=8 just makes it very hard
to schedule correctly.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D109245

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
    llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll
    llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 02d46a4dd89bb..555e087778ff7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -521,21 +521,21 @@ foreach vti = AllIntegerVectors in {
 
 // 12.15. Vector Integer Merge Instructions
 foreach vti = AllIntegerVectors in {
-  def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), vti.RegClass:$rs1,
+  def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1,
                                                       vti.RegClass:$rs2)),
             (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
-                 vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm,
+                 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0),
                  vti.AVL, vti.Log2SEW)>;
 
-  def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat XLenVT:$rs1),
+  def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat XLenVT:$rs1),
                                                       vti.RegClass:$rs2)),
             (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
-                 vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, vti.AVL, vti.Log2SEW)>;
+                 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>;
 
-  def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat_simm5 simm5:$rs1),
+  def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat_simm5 simm5:$rs1),
                                                       vti.RegClass:$rs2)),
             (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
-                 vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.Log2SEW)>;
+                 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>;
 }
 
 // 12.1. Vector Single-Width Saturating Add and Subtract
@@ -711,25 +711,25 @@ defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">;
 // 12.15. Vector Integer Merge Instructions
 // 14.15. Vector Floating-Point Merge Instruction
 foreach fvti = AllFloatVectors in {
-  def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1,
+  def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1,
                                                         fvti.RegClass:$rs2)),
             (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
-                 fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm,
+                 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
                  fvti.AVL, fvti.Log2SEW)>;
 
-  def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
+  def : Pat<(fvti.Vector (vselect (fvti.Mask V0),
                                   (splat_vector fvti.ScalarRegClass:$rs1),
                                   fvti.RegClass:$rs2)),
             (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
                  fvti.RegClass:$rs2,
                  (fvti.Scalar fvti.ScalarRegClass:$rs1),
-                 VMV0:$vm, fvti.AVL, fvti.Log2SEW)>;
+                 (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>;
 
-  def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
+  def : Pat<(fvti.Vector (vselect (fvti.Mask V0),
                                   (splat_vector (fvti.Scalar fpimm0)),
                                   fvti.RegClass:$rs2)),
             (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
-                 fvti.RegClass:$rs2, 0, VMV0:$vm, fvti.AVL, fvti.Log2SEW)>;
+                 fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>;
 }
 
 // 14.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 7a4f133989f27..90869e60e2284 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -287,13 +287,13 @@ multiclass VPatBinaryVL_VV<SDNode vop,
   def : Pat<(result_type (vop
                          (op_type op_reg_class:$rs1),
                          (op_type op_reg_class:$rs2),
-                         (mask_type VMV0:$vm),
+                         (mask_type V0),
                          VLOpFrag)),
         (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX#"_MASK")
                      (result_type (IMPLICIT_DEF)),
                      op_reg_class:$rs1,
                      op_reg_class:$rs2,
-                     VMV0:$vm, GPR:$vl, sew)>;
+                     (mask_type V0), GPR:$vl, sew)>;
 }
 
 multiclass VPatBinaryVL_XI<SDNode vop,
@@ -320,13 +320,13 @@ multiclass VPatBinaryVL_XI<SDNode vop,
   def : Pat<(result_type (vop
                      (vop_type vop_reg_class:$rs1),
                      (vop_type (SplatPatKind (XLenVT xop_kind:$rs2))),
-                     (mask_type VMV0:$vm),
+                     (mask_type V0),
                      VLOpFrag)),
         (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX#"_MASK")
                      (result_type (IMPLICIT_DEF)),
                      vop_reg_class:$rs1,
                      xop_kind:$rs2,
-                     VMV0:$vm, GPR:$vl, sew)>;
+                     (mask_type V0), GPR:$vl, sew)>;
 }
 
 multiclass VPatBinaryVL_VV_VX<SDNode vop, string instruction_name> {
@@ -641,22 +641,22 @@ foreach vti = AllIntegerVectors in {
             (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX)
                  vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
   def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))),
-                          (vti.Vector vti.RegClass:$rs1), (vti.Mask VMV0:$vm),
+                          (vti.Vector vti.RegClass:$rs1), (vti.Mask V0),
                           VLOpFrag),
             (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK")
                  (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2,
-                 VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
+                 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
   def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
                           (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask),
                           VLOpFrag),
             (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX)
                  vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.Log2SEW)>;
   def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
-                          (vti.Vector vti.RegClass:$rs1), (vti.Mask VMV0:$vm),
+                          (vti.Vector vti.RegClass:$rs1), (vti.Mask V0),
                           VLOpFrag),
             (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK")
                  (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, simm5:$rs2,
-                 VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
+                 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
 }
 
 // 12.3. Vector Integer Extension
@@ -882,27 +882,27 @@ foreach vtiTowti = AllWidenableIntVectors in {
 
 // 12.15. Vector Integer Merge Instructions
 foreach vti = AllIntegerVectors in {
-  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
                                           vti.RegClass:$rs1,
                                           vti.RegClass:$rs2,
                                           VLOpFrag)),
             (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
-                 vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm,
+                 vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0),
                  GPR:$vl, vti.Log2SEW)>;
 
-  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
                                           (SplatPat XLenVT:$rs1),
                                           vti.RegClass:$rs2,
                                           VLOpFrag)),
             (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
-                 vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
+                 vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
 
-  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
                                           (SplatPat_simm5 simm5:$rs1),
                                           vti.RegClass:$rs2,
                                           VLOpFrag)),
             (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
-                 vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
+                 vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
 }
 
 // 12.16. Vector Integer Move Instructions
@@ -1126,29 +1126,29 @@ foreach fvti = AllFloatVectors in {
   // Floating-point vselects:
   // 12.15. Vector Integer Merge Instructions
   // 14.15. Vector Floating-Point Merge Instruction
-  def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
+  def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
                                            fvti.RegClass:$rs1,
                                            fvti.RegClass:$rs2,
                                            VLOpFrag)),
             (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
-                 fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm,
+                 fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
                  GPR:$vl, fvti.Log2SEW)>;
 
-  def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
+  def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
                                            (SplatFPOp fvti.ScalarRegClass:$rs1),
                                            fvti.RegClass:$rs2,
                                            VLOpFrag)),
             (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
                  fvti.RegClass:$rs2,
                  (fvti.Scalar fvti.ScalarRegClass:$rs1),
-                 VMV0:$vm, GPR:$vl, fvti.Log2SEW)>;
+                 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
 
-  def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
+  def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
                                            (SplatFPOp (fvti.Scalar fpimm0)),
                                            fvti.RegClass:$rs2,
                                            VLOpFrag)),
             (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
-                 fvti.RegClass:$rs2, 0, VMV0:$vm, GPR:$vl, fvti.Log2SEW)>;
+                 fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
 
   // 14.16. Vector Floating-Point Move Instruction
   // If we're splatting fpimm0, use vmv.v.x vd, x0.
@@ -1302,7 +1302,7 @@ foreach vti = AllIntegerVectors in {
             (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX)
                  vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.Log2SEW)>;
 
-  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
                                           (riscv_vrgather_vv_vl
                                             vti.RegClass:$rs2,
                                             vti.RegClass:$rs1,
@@ -1312,9 +1312,9 @@ foreach vti = AllIntegerVectors in {
                                           VLOpFrag)),
             (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
                  vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
-                 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
+                 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
 
-  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
                                           (riscv_vrgather_vx_vl
                                             vti.RegClass:$rs2,
                                             uimm5:$imm,
@@ -1324,7 +1324,7 @@ foreach vti = AllIntegerVectors in {
                                           VLOpFrag)),
             (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
                  vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
-                 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
+                 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
 
   // emul = lmul * 16 / sew
   defvar vlmul = vti.LMul;
@@ -1341,7 +1341,7 @@ foreach vti = AllIntegerVectors in {
               (!cast<Instruction>(inst)
                    vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
 
-    def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+    def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
                                             (riscv_vrgatherei16_vv_vl
                                               vti.RegClass:$rs2,
                                               (ivti.Vector ivti.RegClass:$rs1),
@@ -1351,7 +1351,7 @@ foreach vti = AllIntegerVectors in {
                                             VLOpFrag)),
               (!cast<Instruction>(inst#"_MASK")
                    vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
-                   vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
+                   (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
   }
 }
 
@@ -1385,7 +1385,7 @@ foreach vti = AllFloatVectors in {
             (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX)
                  vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.Log2SEW)>;
 
-  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
                                           (riscv_vrgather_vv_vl
                                             vti.RegClass:$rs2,
                                             (ivti.Vector vti.RegClass:$rs1),
@@ -1395,9 +1395,9 @@ foreach vti = AllFloatVectors in {
                                           VLOpFrag)),
             (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
                  vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
-                 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
+                 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
 
-  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+  def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
                                           (riscv_vrgather_vx_vl
                                             vti.RegClass:$rs2,
                                             uimm5:$imm,
@@ -1407,7 +1407,7 @@ foreach vti = AllFloatVectors in {
                                           VLOpFrag)),
             (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
                  vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
-                 vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
+                 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
 
   defvar vlmul = vti.LMul;
   defvar octuple_lmul = vlmul.octuple;
@@ -1423,7 +1423,7 @@ foreach vti = AllFloatVectors in {
               (!cast<Instruction>(inst)
                    vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
 
-    def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+    def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
                                             (riscv_vrgatherei16_vv_vl
                                               vti.RegClass:$rs2,
                                               (ivti.Vector ivti.RegClass:$rs1),
@@ -1433,7 +1433,7 @@ foreach vti = AllFloatVectors in {
                                             VLOpFrag)),
               (!cast<Instruction>(inst#"_MASK")
                    vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
-                   vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
+                   (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
   }
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
index 9646e6eada6fd..f3598ed1e3200 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
@@ -122,8 +122,8 @@ define <8 x i1> @fp2si_v8f32_v8i1(<8 x float> %x) {
 ; LMULMAX1-LABEL: fp2si_v8f32_v8i1:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    vmclr.m v0
+; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v27, v8
@@ -164,8 +164,8 @@ define <8 x i1> @fp2ui_v8f32_v8i1(<8 x float> %x) {
 ; LMULMAX1-LABEL: fp2ui_v8f32_v8i1:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    vmclr.m v0
+; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v27, v8
@@ -564,8 +564,8 @@ define <8 x i1> @fp2si_v8f64_v8i1(<8 x double> %x) {
 ; LMULMAX1-LABEL: fp2si_v8f64_v8i1:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    vmclr.m v0
+; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.x.f.w v27, v8
@@ -628,8 +628,8 @@ define <8 x i1> @fp2ui_v8f64_v8i1(<8 x double> %x) {
 ; LMULMAX1-LABEL: fp2ui_v8f64_v8i1:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    vmclr.m v0
+; LMULMAX1-NEXT:    vmv.v.i v25, 0
 ; LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX1-NEXT:    vfncvt.rtz.xu.f.w v27, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
index 451437d2fbd71..b408ec3fa9e47 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
@@ -303,40 +303,40 @@ define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
 ;
 ; LMULMAX1-LABEL: si2fp_v8i1_v8f64:
 ; LMULMAX1:       # %bb.0:
+; LMULMAX1-NEXT:    vmv1r.v v25, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
-; LMULMAX1-NEXT:    vmv.v.i v25, 0
-; LMULMAX1-NEXT:    vmerge.vim v26, v25, -1, v0
-; LMULMAX1-NEXT:    vfcvt.f.x.v v8, v26
-; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v26, 0
-; LMULMAX1-NEXT:    vmerge.vim v27, v26, 1, v0
-; LMULMAX1-NEXT:    vmv1r.v v28, v0
+; LMULMAX1-NEXT:    vmerge.vim v27, v26, -1, v0
+; LMULMAX1-NEXT:    vfcvt.f.x.v v8, v27
+; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; LMULMAX1-NEXT:    vmv.v.i v27, 0
+; LMULMAX1-NEXT:    vmerge.vim v28, v27, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vslidedown.vi v27, v27, 2
+; LMULMAX1-NEXT:    vslidedown.vi v28, v28, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; LMULMAX1-NEXT:    vmsne.vi v0, v28, 0
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; LMULMAX1-NEXT:    vmerge.vim v27, v25, -1, v0
-; LMULMAX1-NEXT:    vfcvt.f.x.v v9, v27
+; LMULMAX1-NEXT:    vmerge.vim v28, v26, -1, v0
+; LMULMAX1-NEXT:    vfcvt.f.x.v v9, v28
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; LMULMAX1-NEXT:    vmv.v.i v27, 0
-; LMULMAX1-NEXT:    vmv1r.v v0, v28
-; LMULMAX1-NEXT:    vmerge.vim v27, v27, 1, v0
+; LMULMAX1-NEXT:    vmv.v.i v28, 0
+; LMULMAX1-NEXT:    vmv1r.v v0, v25
+; LMULMAX1-NEXT:    vmerge.vim v25, v28, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
-; LMULMAX1-NEXT:    vslidedown.vi v27, v27, 4
+; LMULMAX1-NEXT:    vslidedown.vi v25, v25, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
-; LMULMAX1-NEXT:    vmerge.vim v27, v25, -1, v0
-; LMULMAX1-NEXT:    vfcvt.f.x.v v10, v27
+; LMULMAX1-NEXT:    vmerge.vim v25, v26, -1, v0
+; LMULMAX1-NEXT:    vfcvt.f.x.v v10, v25
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vmerge.vim v26, v26, 1, v0
+; LMULMAX1-NEXT:    vmerge.vim v25, v27, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vslidedown.vi v26, v26, 2
+; LMULMAX1-NEXT:    vslidedown.vi v25, v25, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vmsne.vi v0, v26, 0
+; LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; LMULMAX1-NEXT:    vmerge.vim v25, v25, -1, v0
+; LMULMAX1-NEXT:    vmerge.vim v25, v26, -1, v0
 ; LMULMAX1-NEXT:    vfcvt.f.x.v v11, v25
 ; LMULMAX1-NEXT:    ret
   %z = sitofp <8 x i1> %x to <8 x double>
@@ -354,40 +354,40 @@ define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
 ;
 ; LMULMAX1-LABEL: ui2fp_v8i1_v8f64:
 ; LMULMAX1:       # %bb.0:
+; LMULMAX1-NEXT:    vmv1r.v v25, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
-; LMULMAX1-NEXT:    vmv.v.i v25, 0
-; LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v8, v26
-; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v26, 0
 ; LMULMAX1-NEXT:    vmerge.vim v27, v26, 1, v0
-; LMULMAX1-NEXT:    vmv1r.v v28, v0
+; LMULMAX1-NEXT:    vfcvt.f.xu.v v8, v27
+; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; LMULMAX1-NEXT:    vmv.v.i v27, 0
+; LMULMAX1-NEXT:    vmerge.vim v28, v27, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vslidedown.vi v27, v27, 2
+; LMULMAX1-NEXT:    vslidedown.vi v28, v28, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; LMULMAX1-NEXT:    vmsne.vi v0, v28, 0
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; LMULMAX1-NEXT:    vmerge.vim v27, v25, 1, v0
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v9, v27
+; LMULMAX1-NEXT:    vmerge.vim v28, v26, 1, v0
+; LMULMAX1-NEXT:    vfcvt.f.xu.v v9, v28
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; LMULMAX1-NEXT:    vmv.v.i v27, 0
-; LMULMAX1-NEXT:    vmv1r.v v0, v28
-; LMULMAX1-NEXT:    vmerge.vim v27, v27, 1, v0
+; LMULMAX1-NEXT:    vmv.v.i v28, 0
+; LMULMAX1-NEXT:    vmv1r.v v0, v25
+; LMULMAX1-NEXT:    vmerge.vim v25, v28, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
-; LMULMAX1-NEXT:    vslidedown.vi v27, v27, 4
+; LMULMAX1-NEXT:    vslidedown.vi v25, v25, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
-; LMULMAX1-NEXT:    vmerge.vim v27, v25, 1, v0
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v10, v27
+; LMULMAX1-NEXT:    vmerge.vim v25, v26, 1, v0
+; LMULMAX1-NEXT:    vfcvt.f.xu.v v10, v25
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vmerge.vim v26, v26, 1, v0
+; LMULMAX1-NEXT:    vmerge.vim v25, v27, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
-; LMULMAX1-NEXT:    vslidedown.vi v26, v26, 2
+; LMULMAX1-NEXT:    vslidedown.vi v25, v25, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
-; LMULMAX1-NEXT:    vmsne.vi v0, v26, 0
+; LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
 ; LMULMAX1-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; LMULMAX1-NEXT:    vmerge.vim v25, v25, 1, v0
+; LMULMAX1-NEXT:    vmerge.vim v25, v26, 1, v0
 ; LMULMAX1-NEXT:    vfcvt.f.xu.v v11, v25
 ; LMULMAX1-NEXT:    ret
   %z = uitofp <8 x i1> %x to <8 x double>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 7ca4ba21dab74..218d358c7f8b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -388,18 +388,18 @@ define void @insert_v8i1_v4i1_0(<8 x i1>* %vp, <4 x i1>* %svp) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
-; CHECK-NEXT:    vle1.v v27, (a1)
+; CHECK-NEXT:    vle1.v v25, (a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv.v.i v25, 0
-; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmv.v.i v26, 0
-; CHECK-NEXT:    vmv1r.v v0, v27
 ; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vmv.v.i v27, 0
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmerge.vim v25, v27, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vi v25, v26, 0
+; CHECK-NEXT:    vslideup.vi v26, v25, 0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmsne.vi v25, v26, 0
 ; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
   %v = load <8 x i1>, <8 x i1>* %vp
@@ -415,18 +415,18 @@ define void @insert_v8i1_v4i1_4(<8 x i1>* %vp, <4 x i1>* %svp) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
-; CHECK-NEXT:    vle1.v v27, (a1)
+; CHECK-NEXT:    vle1.v v25, (a1)
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv.v.i v25, 0
-; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmv.v.i v26, 0
-; CHECK-NEXT:    vmv1r.v v0, v27
 ; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vmv.v.i v27, 0
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmerge.vim v25, v27, 1, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vi v25, v26, 4
+; CHECK-NEXT:    vslideup.vi v26, v25, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vmsne.vi v25, v25, 0
+; CHECK-NEXT:    vmsne.vi v25, v26, 0
 ; CHECK-NEXT:    vse1.v v25, (a0)
 ; CHECK-NEXT:    ret
   %v = load <8 x i1>, <8 x i1>* %vp
@@ -466,18 +466,18 @@ define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, <4 x i1>* %
 ; CHECK-LABEL: insert_nxv2i1_v4i1_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
-; CHECK-NEXT:    vle1.v v27, (a0)
+; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv.v.i v25, 0
-; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmv.v.i v26, 0
-; CHECK-NEXT:    vmv1r.v v0, v27
 ; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vmv.v.i v27, 0
+; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vmerge.vim v25, v27, 1, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
-; CHECK-NEXT:    vslideup.vi v25, v26, 0
+; CHECK-NEXT:    vslideup.vi v26, v25, 0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    vmsne.vi v0, v26, 0
 ; CHECK-NEXT:    ret
   %sv = load <4 x i1>, <4 x i1>* %svp
   %c = call <vscale x 2 x i1> @llvm.experimental.vector.insert.v4i1.nxv2i1(<vscale x 2 x i1> %v, <4 x i1> %sv, i64 0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 39b6b704fa0ac..81435f224ab70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -4122,10 +4122,10 @@ define void @mulhu_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-RV32-NEXT:    lui a1, 4
 ; LMULMAX2-RV32-NEXT:    addi a1, a1, 64
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
-; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a1
+; LMULMAX2-RV32-NEXT:    vmv.s.x v25, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
+; LMULMAX2-RV32-NEXT:    vmv1r.v v0, v25
 ; LMULMAX2-RV32-NEXT:    vmerge.vim v28, v28, 1, v0
-; LMULMAX2-RV32-NEXT:    vmv1r.v v12, v0
 ; LMULMAX2-RV32-NEXT:    addi a1, zero, 257
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a1
@@ -4136,7 +4136,7 @@ define void @mulhu_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-RV32-NEXT:    vle16.v v8, (a1)
 ; LMULMAX2-RV32-NEXT:    lui a1, 1048568
 ; LMULMAX2-RV32-NEXT:    vmerge.vxm v10, v30, a1, v0
-; LMULMAX2-RV32-NEXT:    vmv1r.v v0, v12
+; LMULMAX2-RV32-NEXT:    vmv1r.v v0, v25
 ; LMULMAX2-RV32-NEXT:    vmerge.vim v30, v30, 1, v0
 ; LMULMAX2-RV32-NEXT:    vsrl.vv v30, v26, v30
 ; LMULMAX2-RV32-NEXT:    vmulhu.vv v30, v30, v8
@@ -4161,10 +4161,10 @@ define void @mulhu_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-RV64-NEXT:    lui a1, 4
 ; LMULMAX2-RV64-NEXT:    addiw a1, a1, 64
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
-; LMULMAX2-RV64-NEXT:    vmv.s.x v0, a1
+; LMULMAX2-RV64-NEXT:    vmv.s.x v25, a1
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
+; LMULMAX2-RV64-NEXT:    vmv1r.v v0, v25
 ; LMULMAX2-RV64-NEXT:    vmerge.vim v28, v28, 1, v0
-; LMULMAX2-RV64-NEXT:    vmv1r.v v12, v0
 ; LMULMAX2-RV64-NEXT:    addi a1, zero, 257
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; LMULMAX2-RV64-NEXT:    vmv.s.x v0, a1
@@ -4175,7 +4175,7 @@ define void @mulhu_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-RV64-NEXT:    vle16.v v8, (a1)
 ; LMULMAX2-RV64-NEXT:    lui a1, 1048568
 ; LMULMAX2-RV64-NEXT:    vmerge.vxm v10, v30, a1, v0
-; LMULMAX2-RV64-NEXT:    vmv1r.v v0, v12
+; LMULMAX2-RV64-NEXT:    vmv1r.v v0, v25
 ; LMULMAX2-RV64-NEXT:    vmerge.vim v30, v30, 1, v0
 ; LMULMAX2-RV64-NEXT:    vsrl.vv v30, v26, v30
 ; LMULMAX2-RV64-NEXT:    vmulhu.vv v30, v30, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index 05faa10eb5e1e..bb1104e4ac0a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -403,23 +403,23 @@ define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %ev
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a2, zero, 128
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
-; CHECK-NEXT:    vle1.v v25, (a0)
-; CHECK-NEXT:    addi a0, a1, -128
-; CHECK-NEXT:    vmv1r.v v26, v0
-; CHECK-NEXT:    mv a3, zero
-; CHECK-NEXT:    bltu a1, a0, .LBB31_2
+; CHECK-NEXT:    vle1.v v26, (a0)
+; CHECK-NEXT:    addi a3, a1, -128
+; CHECK-NEXT:    vmv1r.v v25, v0
+; CHECK-NEXT:    mv a0, zero
+; CHECK-NEXT:    bltu a1, a3, .LBB31_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a3, a0
+; CHECK-NEXT:    mv a0, a3
 ; CHECK-NEXT:  .LBB31_2:
-; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v25
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v26
 ; CHECK-NEXT:    vadd.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    bltu a1, a2, .LBB31_4
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    addi a1, zero, 128
 ; CHECK-NEXT:  .LBB31_4:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v26
+; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> undef, i8 -1, i32 0
@@ -1524,9 +1524,9 @@ declare <32 x i64> @llvm.vp.add.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
 define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vadd_vx_v32i64:
 ; RV32:       # %bb.0:
+; RV32-NEXT:    vmv1r.v v1, v0
 ; RV32-NEXT:    mv a1, zero
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
-; RV32-NEXT:    vmv1r.v v1, v0
 ; RV32-NEXT:    vslidedown.vi v0, v0, 2
 ; RV32-NEXT:    addi a2, zero, 32
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
@@ -1550,10 +1550,10 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ;
 ; RV64-LABEL: vadd_vx_v32i64:
 ; RV64:       # %bb.0:
+; RV64-NEXT:    vmv1r.v v25, v0
 ; RV64-NEXT:    mv a1, zero
 ; RV64-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV64-NEXT:    addi a2, a0, -16
-; RV64-NEXT:    vmv1r.v v25, v0
 ; RV64-NEXT:    vslidedown.vi v0, v0, 2
 ; RV64-NEXT:    bltu a0, a2, .LBB107_2
 ; RV64-NEXT:  # %bb.1:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
index de3953e794754..087504cf84b2e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
@@ -7,8 +7,8 @@ define void @vselect_vv_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle1.v v0, (a2)
+; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vmerge.vvm v26, v28, v26, v0
 ; CHECK-NEXT:    vse32.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -24,8 +24,8 @@ define void @vselect_vx_v8i32(i32 %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %
 ; CHECK-LABEL: vselect_vx_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vle1.v v0, (a2)
+; CHECK-NEXT:    vle32.v v26, (a1)
 ; CHECK-NEXT:    vmerge.vxm v26, v26, a0, v0
 ; CHECK-NEXT:    vse32.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -42,8 +42,8 @@ define void @vselect_vi_v8i32(<8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
 ; CHECK-LABEL: vselect_vi_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle1.v v0, (a1)
+; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vmerge.vim v26, v26, -1, v0
 ; CHECK-NEXT:    vse32.v v26, (a2)
 ; CHECK-NEXT:    ret
@@ -61,8 +61,8 @@ define void @vselect_vv_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i1>* %cc, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v26, (a0)
-; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vle1.v v0, (a2)
+; CHECK-NEXT:    vle32.v v28, (a1)
 ; CHECK-NEXT:    vmerge.vvm v26, v28, v26, v0
 ; CHECK-NEXT:    vse32.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -78,8 +78,8 @@ define void @vselect_vx_v8f32(float %a, <8 x float>* %b, <8 x i1>* %cc, <8 x flo
 ; CHECK-LABEL: vselect_vx_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle1.v v0, (a1)
+; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vfmerge.vfm v26, v26, fa0, v0
 ; CHECK-NEXT:    vse32.v v26, (a2)
 ; CHECK-NEXT:    ret
@@ -96,8 +96,8 @@ define void @vselect_vfpzero_v8f32(<8 x float>* %b, <8 x i1>* %cc, <8 x float>*
 ; CHECK-LABEL: vselect_vfpzero_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vle1.v v0, (a1)
+; CHECK-NEXT:    vle32.v v26, (a0)
 ; CHECK-NEXT:    vmerge.vim v26, v26, 0, v0
 ; CHECK-NEXT:    vse32.v v26, (a2)
 ; CHECK-NEXT:    ret
@@ -115,8 +115,8 @@ define void @vselect_vv_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %cc, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vle16.v v26, (a0)
-; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vle1.v v0, (a2)
+; CHECK-NEXT:    vle16.v v28, (a1)
 ; CHECK-NEXT:    vmerge.vvm v26, v28, v26, v0
 ; CHECK-NEXT:    vse16.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -132,8 +132,8 @@ define void @vselect_vx_v16i16(i16 signext %a, <16 x i16>* %b, <16 x i1>* %cc, <
 ; CHECK-LABEL: vselect_vx_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
-; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vle1.v v0, (a2)
+; CHECK-NEXT:    vle16.v v26, (a1)
 ; CHECK-NEXT:    vmerge.vxm v26, v26, a0, v0
 ; CHECK-NEXT:    vse16.v v26, (a3)
 ; CHECK-NEXT:    ret
@@ -150,8 +150,8 @@ define void @vselect_vi_v16i16(<16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
 ; CHECK-LABEL: vselect_vi_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
-; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vle1.v v0, (a1)
+; CHECK-NEXT:    vle16.v v26, (a0)
 ; CHECK-NEXT:    vmerge.vim v26, v26, 4, v0
 ; CHECK-NEXT:    vse16.v v26, (a2)
 ; CHECK-NEXT:    ret
@@ -170,8 +170,8 @@ define void @vselect_vv_v32f16(<32 x half>* %a, <32 x half>* %b, <32 x i1>* %cc,
 ; CHECK-NEXT:    addi a4, zero, 32
 ; CHECK-NEXT:    vsetvli zero, a4, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v28, (a0)
-; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    vle1.v v0, (a2)
+; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    vmerge.vvm v28, v8, v28, v0
 ; CHECK-NEXT:    vse16.v v28, (a3)
 ; CHECK-NEXT:    ret
@@ -188,8 +188,8 @@ define void @vselect_vx_v32f16(half %a, <32 x half>* %b, <32 x i1>* %cc, <32 x h
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a3, zero, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
-; CHECK-NEXT:    vle16.v v28, (a0)
 ; CHECK-NEXT:    vle1.v v0, (a1)
+; CHECK-NEXT:    vle16.v v28, (a0)
 ; CHECK-NEXT:    vfmerge.vfm v28, v28, fa0, v0
 ; CHECK-NEXT:    vse16.v v28, (a2)
 ; CHECK-NEXT:    ret
@@ -207,8 +207,8 @@ define void @vselect_vfpzero_v32f16(<32 x half>* %b, <32 x i1>* %cc, <32 x half>
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a3, zero, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
-; CHECK-NEXT:    vle16.v v28, (a0)
 ; CHECK-NEXT:    vle1.v v0, (a1)
+; CHECK-NEXT:    vle16.v v28, (a0)
 ; CHECK-NEXT:    vmerge.vim v28, v28, 0, v0
 ; CHECK-NEXT:    vse16.v v28, (a2)
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll
index 7f98d13e45fcb..0dcf46c5b3fbb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll
@@ -121,11 +121,10 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    sub sp, sp, a3
 ; RV64-1024-NEXT:    addi a3, zero, 256
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
-; RV64-1024-NEXT:    vle16.v v24, (a1)
+; RV64-1024-NEXT:    vle16.v v16, (a1)
 ; RV64-1024-NEXT:    vle16.v v8, (a2)
 ; RV64-1024-NEXT:    csrr a1, vlenb
-; RV64-1024-NEXT:    addi a2, zero, 24
-; RV64-1024-NEXT:    mul a1, a1, a2
+; RV64-1024-NEXT:    slli a1, a1, 4
 ; RV64-1024-NEXT:    add a1, sp, a1
 ; RV64-1024-NEXT:    addi a1, a1, 16
 ; RV64-1024-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
@@ -133,12 +132,12 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; RV64-1024-NEXT:    vmv.v.i v8, 0
 ; RV64-1024-NEXT:    csrr a2, vlenb
-; RV64-1024-NEXT:    slli a2, a2, 4
+; RV64-1024-NEXT:    slli a2, a2, 5
 ; RV64-1024-NEXT:    add a2, sp, a2
 ; RV64-1024-NEXT:    addi a2, a2, 16
 ; RV64-1024-NEXT:    vs8r.v v8, (a2) # Unknown-size Folded Spill
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m8, tu, mu
-; RV64-1024-NEXT:    vslideup.vi v8, v24, 0
+; RV64-1024-NEXT:    vslideup.vi v8, v16, 0
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; RV64-1024-NEXT:    vmv.v.i v16, 0
 ; RV64-1024-NEXT:    addi a2, sp, 16
@@ -149,7 +148,8 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    vid.v v24
 ; RV64-1024-NEXT:    vsrl.vi v16, v24, 1
 ; RV64-1024-NEXT:    csrr a2, vlenb
-; RV64-1024-NEXT:    slli a2, a2, 5
+; RV64-1024-NEXT:    addi a4, zero, 24
+; RV64-1024-NEXT:    mul a2, a2, a4
 ; RV64-1024-NEXT:    add a2, sp, a2
 ; RV64-1024-NEXT:    addi a2, a2, 16
 ; RV64-1024-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
@@ -161,28 +161,32 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    vs8r.v v0, (a2) # Unknown-size Folded Spill
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m8, tu, mu
 ; RV64-1024-NEXT:    csrr a2, vlenb
-; RV64-1024-NEXT:    slli a2, a2, 4
+; RV64-1024-NEXT:    slli a2, a2, 5
 ; RV64-1024-NEXT:    add a2, sp, a2
 ; RV64-1024-NEXT:    addi a2, a2, 16
-; RV64-1024-NEXT:    vl8re8.v v16, (a2) # Unknown-size Folded Reload
+; RV64-1024-NEXT:    vl8re8.v v8, (a2) # Unknown-size Folded Reload
 ; RV64-1024-NEXT:    csrr a2, vlenb
-; RV64-1024-NEXT:    addi a4, zero, 24
-; RV64-1024-NEXT:    mul a2, a2, a4
+; RV64-1024-NEXT:    slli a2, a2, 4
 ; RV64-1024-NEXT:    add a2, sp, a2
 ; RV64-1024-NEXT:    addi a2, a2, 16
-; RV64-1024-NEXT:    vl8re8.v v8, (a2) # Unknown-size Folded Reload
-; RV64-1024-NEXT:    vslideup.vi v16, v8, 0
+; RV64-1024-NEXT:    vl8re8.v v16, (a2) # Unknown-size Folded Reload
+; RV64-1024-NEXT:    vslideup.vi v8, v16, 0
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
 ; RV64-1024-NEXT:    addi a2, sp, 16
-; RV64-1024-NEXT:    vl8re8.v v8, (a2) # Unknown-size Folded Reload
-; RV64-1024-NEXT:    vslideup.vx v16, v8, a3
+; RV64-1024-NEXT:    vl8re8.v v16, (a2) # Unknown-size Folded Reload
+; RV64-1024-NEXT:    vslideup.vx v8, v16, a3
+; RV64-1024-NEXT:    csrr a2, vlenb
+; RV64-1024-NEXT:    slli a2, a2, 5
+; RV64-1024-NEXT:    add a2, sp, a2
+; RV64-1024-NEXT:    addi a2, a2, 16
+; RV64-1024-NEXT:    vs8r.v v8, (a2) # Unknown-size Folded Spill
 ; RV64-1024-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
 ; RV64-1024-NEXT:    csrr a2, vlenb
 ; RV64-1024-NEXT:    slli a2, a2, 3
 ; RV64-1024-NEXT:    add a2, sp, a2
 ; RV64-1024-NEXT:    addi a2, a2, 16
-; RV64-1024-NEXT:    vl8re8.v v0, (a2) # Unknown-size Folded Reload
-; RV64-1024-NEXT:    vrgather.vv v8, v0, v24
+; RV64-1024-NEXT:    vl8re8.v v16, (a2) # Unknown-size Folded Reload
+; RV64-1024-NEXT:    vrgather.vv v8, v16, v24
 ; RV64-1024-NEXT:    lui a2, 1026731
 ; RV64-1024-NEXT:    addiw a2, a2, -1365
 ; RV64-1024-NEXT:    slli a2, a2, 12
@@ -192,29 +196,35 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    slli a2, a2, 12
 ; RV64-1024-NEXT:    addi a2, a2, -1366
 ; RV64-1024-NEXT:    vsetivli zero, 8, e64, m1, ta, mu
-; RV64-1024-NEXT:    vmv.s.x v25, a2
+; RV64-1024-NEXT:    vmv.s.x v16, a2
 ; RV64-1024-NEXT:    vsetivli zero, 2, e64, m1, tu, mu
-; RV64-1024-NEXT:    vmv1r.v v0, v25
-; RV64-1024-NEXT:    vslideup.vi v0, v25, 1
+; RV64-1024-NEXT:    vmv1r.v v0, v16
+; RV64-1024-NEXT:    vslideup.vi v0, v16, 1
 ; RV64-1024-NEXT:    vsetivli zero, 3, e64, m1, tu, mu
-; RV64-1024-NEXT:    vslideup.vi v0, v25, 2
+; RV64-1024-NEXT:    vslideup.vi v0, v16, 2
 ; RV64-1024-NEXT:    vsetivli zero, 4, e64, m1, tu, mu
-; RV64-1024-NEXT:    vslideup.vi v0, v25, 3
+; RV64-1024-NEXT:    vslideup.vi v0, v16, 3
 ; RV64-1024-NEXT:    vsetivli zero, 5, e64, m1, tu, mu
-; RV64-1024-NEXT:    vslideup.vi v0, v25, 4
+; RV64-1024-NEXT:    vslideup.vi v0, v16, 4
 ; RV64-1024-NEXT:    vsetivli zero, 6, e64, m1, tu, mu
-; RV64-1024-NEXT:    vslideup.vi v0, v25, 5
+; RV64-1024-NEXT:    vslideup.vi v0, v16, 5
 ; RV64-1024-NEXT:    vsetivli zero, 7, e64, m1, tu, mu
-; RV64-1024-NEXT:    vslideup.vi v0, v25, 6
+; RV64-1024-NEXT:    vslideup.vi v0, v16, 6
 ; RV64-1024-NEXT:    vsetivli zero, 8, e64, m1, tu, mu
-; RV64-1024-NEXT:    vslideup.vi v0, v25, 7
+; RV64-1024-NEXT:    vslideup.vi v0, v16, 7
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
 ; RV64-1024-NEXT:    csrr a1, vlenb
+; RV64-1024-NEXT:    addi a2, zero, 24
+; RV64-1024-NEXT:    mul a1, a1, a2
+; RV64-1024-NEXT:    add a1, sp, a1
+; RV64-1024-NEXT:    addi a1, a1, 16
+; RV64-1024-NEXT:    vl8re8.v v16, (a1) # Unknown-size Folded Reload
+; RV64-1024-NEXT:    csrr a1, vlenb
 ; RV64-1024-NEXT:    slli a1, a1, 5
 ; RV64-1024-NEXT:    add a1, sp, a1
 ; RV64-1024-NEXT:    addi a1, a1, 16
 ; RV64-1024-NEXT:    vl8re8.v v24, (a1) # Unknown-size Folded Reload
-; RV64-1024-NEXT:    vrgather.vv v8, v16, v24, v0.t
+; RV64-1024-NEXT:    vrgather.vv v8, v24, v16, v0.t
 ; RV64-1024-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
 ; RV64-1024-NEXT:    vse16.v v8, (a0)
 ; RV64-1024-NEXT:    csrr a0, vlenb

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index 5e566a7f8e261..effd4f694bde5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -1532,13 +1532,13 @@ declare <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32>, <vscale x
 define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vadd_vi_nxv32i32:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    mv a2, zero
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    srli a4, a1, 2
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    slli a1, a1, 1
 ; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vslidedown.vx v0, v0, a4
 ; CHECK-NEXT:    bltu a0, a3, .LBB117_2
 ; CHECK-NEXT:  # %bb.1:
@@ -1573,7 +1573,6 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va, i
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    slli a1, a1, 1
 ; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vmv1r.v v26, v25
 ; CHECK-NEXT:    vslidedown.vx v0, v25, a4
 ; CHECK-NEXT:    bltu a0, a3, .LBB118_2
 ; CHECK-NEXT:  # %bb.1:
@@ -1586,7 +1585,7 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va, i
 ; CHECK-NEXT:    mv a0, a1
 ; CHECK-NEXT:  .LBB118_4:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v26
+; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i32> undef, i32 -1, i32 0
@@ -1607,13 +1606,13 @@ declare i32 @llvm.vscale.i32()
 define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m) {
 ; CHECK-LABEL: vadd_vi_nxv32i32_evl_nx8:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    mv a2, zero
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a4, a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    slli a1, a0, 1
 ; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vslidedown.vx v0, v0, a4
 ; CHECK-NEXT:    bltu a0, a3, .LBB119_2
 ; CHECK-NEXT:  # %bb.1:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
index ec511dfb87b4d..7713e2569851b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
@@ -393,26 +393,36 @@ define <vscale x 16 x double> @vselect_combine_regression(<vscale x 16 x i64> %v
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    slli a1, a1, 4
 ; CHECK-NEXT:    sub sp, sp, a1
 ; CHECK-NEXT:    addi a1, sp, 16
 ; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    slli a1, a1, 3
 ; CHECK-NEXT:    add a1, a0, a1
-; CHECK-NEXT:    vl8re64.v v24, (a1)
+; CHECK-NEXT:    vl8re64.v v8, (a1)
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    vl8re64.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmseq.vi v1, v16, 0
+; CHECK-NEXT:    vmseq.vi v25, v16, 0
 ; CHECK-NEXT:    addi a0, sp, 16
 ; CHECK-NEXT:    vl8re8.v v16, (a0) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vmseq.vi v0, v16, 0
 ; CHECK-NEXT:    vmv.v.i v16, 0
 ; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v1
-; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
+; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    vl8re8.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
@@ -426,16 +436,15 @@ define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
-; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmand.mm v1, v0, v25
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a2, a0, 3
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vslidedown.vx v0, v25, a2
-; CHECK-NEXT:    vmv1r.v v2, v25
+; CHECK-NEXT:    vslidedown.vx v0, v1, a2
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv.v.i v24, 0
 ; CHECK-NEXT:    vmerge.vvm v16, v24, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v2
+; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vmerge.vvm v8, v24, v8, v0
 ; CHECK-NEXT:    vs8r.v v8, (a1)
 ; CHECK-NEXT:    slli a0, a0, 3

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
index df67335d554a3..fb9517afc04b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
@@ -393,26 +393,36 @@ define <vscale x 16 x double> @vselect_combine_regression(<vscale x 16 x i64> %v
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    slli a1, a1, 4
 ; CHECK-NEXT:    sub sp, sp, a1
 ; CHECK-NEXT:    addi a1, sp, 16
 ; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    slli a1, a1, 3
 ; CHECK-NEXT:    add a1, a0, a1
-; CHECK-NEXT:    vl8re64.v v24, (a1)
+; CHECK-NEXT:    vl8re64.v v8, (a1)
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    vl8re64.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vmseq.vi v1, v16, 0
+; CHECK-NEXT:    vmseq.vi v25, v16, 0
 ; CHECK-NEXT:    addi a0, sp, 16
 ; CHECK-NEXT:    vl8re8.v v16, (a0) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vmseq.vi v0, v16, 0
 ; CHECK-NEXT:    vmv.v.i v16, 0
 ; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v1
-; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
+; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    vl8re8.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
@@ -426,16 +436,15 @@ define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vle1.v v25, (a0)
-; CHECK-NEXT:    vmand.mm v25, v0, v25
+; CHECK-NEXT:    vmand.mm v1, v0, v25
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a2, a0, 3
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vslidedown.vx v0, v25, a2
-; CHECK-NEXT:    vmv1r.v v2, v25
+; CHECK-NEXT:    vslidedown.vx v0, v1, a2
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv.v.i v24, 0
 ; CHECK-NEXT:    vmerge.vvm v16, v24, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v2
+; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    vmerge.vvm v8, v24, v8, v0
 ; CHECK-NEXT:    vs8r.v v8, (a1)
 ; CHECK-NEXT:    slli a0, a0, 3


        


More information about the llvm-commits mailing list