[llvm] [RISCV] Reduce LMUL when index is known when lowering insert_vector_elt (PR #66087)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 13 07:48:14 PDT 2023


https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/66087:

>From 199e6df31bd90dbd00b84d8cda04469106a667d5 Mon Sep 17 00:00:00 2001
From: Jay Foad <jay.foad at amd.com>
Date: Wed, 13 Sep 2023 14:59:44 +0100
Subject: [PATCH 1/4] [TwoAddressInstruction] Update LiveIntervals after
 INSERT_SUBREG with undef read

Update LiveIntervals after rewriting:
  %reg = INSERT_SUBREG undef %reg, %subreg, subidx
to:
  undef %reg:subidx = COPY %subreg

D113044 implemented this for the non-undef case.
---
 .../lib/CodeGen/TwoAddressInstructionPass.cpp |   12 +-
 llvm/test/CodeGen/Thumb2/mve-fmath.ll         |   52 +-
 llvm/test/CodeGen/Thumb2/mve-shuffle.ll       |  189 ++-
 llvm/test/CodeGen/Thumb2/mve-vld3.ll          | 1269 +++++++++++------
 4 files changed, 1019 insertions(+), 503 deletions(-)

diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index 45f61262faf9391..41b888592cc18b6 100644
--- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -1868,12 +1868,16 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
             // %reg.subidx.
             LaneBitmask LaneMask =
                 TRI->getSubRegIndexLaneMask(mi->getOperand(0).getSubReg());
-            SlotIndex Idx = LIS->getInstructionIndex(*mi);
+            SlotIndex Idx = LIS->getInstructionIndex(*mi).getRegSlot();
             for (auto &S : LI.subranges()) {
               if ((S.LaneMask & LaneMask).none()) {
-                LiveRange::iterator UseSeg = S.FindSegmentContaining(Idx);
-                LiveRange::iterator DefSeg = std::next(UseSeg);
-                S.MergeValueNumberInto(DefSeg->valno, UseSeg->valno);
+                LiveRange::iterator DefSeg = S.FindSegmentContaining(Idx);
+                if (mi->getOperand(0).isUndef()) {
+                  S.removeValNo(DefSeg->valno);
+                } else {
+                  LiveRange::iterator UseSeg = std::prev(DefSeg);
+                  S.MergeValueNumberInto(DefSeg->valno, UseSeg->valno);
+                }
               }
             }
 
diff --git a/llvm/test/CodeGen/Thumb2/mve-fmath.ll b/llvm/test/CodeGen/Thumb2/mve-fmath.ll
index 844da3baa42ba8d..3a477f987cee61f 100644
--- a/llvm/test/CodeGen/Thumb2/mve-fmath.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-fmath.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LV
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LV
+; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LIS
+; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LIS
 
 define arm_aapcs_vfpcc <4 x float> @sqrt_float32_t(<4 x float> %src) {
 ; CHECK-LABEL: sqrt_float32_t:
@@ -1085,21 +1087,37 @@ entry:
 }
 
 define arm_aapcs_vfpcc <2 x double> @copysign_float64_t(<2 x double> %src1, <2 x double> %src2) {
-; CHECK-LABEL: copysign_float64_t:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r7, lr}
-; CHECK-NEXT:    push {r7, lr}
-; CHECK-NEXT:    vmov r0, r1, d3
-; CHECK-NEXT:    vmov r0, lr, d2
-; CHECK-NEXT:    vmov r0, r3, d1
-; CHECK-NEXT:    vmov r12, r2, d0
-; CHECK-NEXT:    lsrs r1, r1, #31
-; CHECK-NEXT:    bfi r3, r1, #31, #1
-; CHECK-NEXT:    lsr.w r1, lr, #31
-; CHECK-NEXT:    bfi r2, r1, #31, #1
-; CHECK-NEXT:    vmov d1, r0, r3
-; CHECK-NEXT:    vmov d0, r12, r2
-; CHECK-NEXT:    pop {r7, pc}
+; CHECK-LV-LABEL: copysign_float64_t:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .save {r7, lr}
+; CHECK-LV-NEXT:    push {r7, lr}
+; CHECK-LV-NEXT:    vmov r0, r1, d3
+; CHECK-LV-NEXT:    vmov r0, lr, d2
+; CHECK-LV-NEXT:    vmov r0, r3, d1
+; CHECK-LV-NEXT:    vmov r12, r2, d0
+; CHECK-LV-NEXT:    lsrs r1, r1, #31
+; CHECK-LV-NEXT:    bfi r3, r1, #31, #1
+; CHECK-LV-NEXT:    lsr.w r1, lr, #31
+; CHECK-LV-NEXT:    bfi r2, r1, #31, #1
+; CHECK-LV-NEXT:    vmov d1, r0, r3
+; CHECK-LV-NEXT:    vmov d0, r12, r2
+; CHECK-LV-NEXT:    pop {r7, pc}
+;
+; CHECK-LIS-LABEL: copysign_float64_t:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .save {r4, lr}
+; CHECK-LIS-NEXT:    push {r4, lr}
+; CHECK-LIS-NEXT:    vmov r0, r12, d3
+; CHECK-LIS-NEXT:    vmov r0, lr, d2
+; CHECK-LIS-NEXT:    vmov r4, r3, d1
+; CHECK-LIS-NEXT:    vmov r1, r2, d0
+; CHECK-LIS-NEXT:    lsr.w r0, r12, #31
+; CHECK-LIS-NEXT:    bfi r3, r0, #31, #1
+; CHECK-LIS-NEXT:    lsr.w r0, lr, #31
+; CHECK-LIS-NEXT:    bfi r2, r0, #31, #1
+; CHECK-LIS-NEXT:    vmov d1, r4, r3
+; CHECK-LIS-NEXT:    vmov d0, r1, r2
+; CHECK-LIS-NEXT:    pop {r4, pc}
 entry:
   %0 = call fast <2 x double> @llvm.copysign.v2f64(<2 x double> %src1, <2 x double> %src2)
   ret <2 x double> %0
diff --git a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
index 93a058828765e2b..6e644c58687faea 100644
--- a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECKFP
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LV
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LV,CHECKFP
+; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LIS
+; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LIS,CHECKFP
 
 define arm_aapcs_vfpcc <4 x i32> @shuffle1_i32(<4 x i32> %src) {
 ; CHECK-LABEL: shuffle1_i32:
@@ -221,18 +223,31 @@ entry:
 }
 
 define arm_aapcs_vfpcc <8 x i16> @shuffle3_i16(<8 x i16> %src) {
-; CHECK-LABEL: shuffle3_i16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vmovx.f16 s2, s5
-; CHECK-NEXT:    vmovx.f16 s0, s4
-; CHECK-NEXT:    vins.f16 s5, s4
-; CHECK-NEXT:    vins.f16 s2, s0
-; CHECK-NEXT:    vmov.f32 s3, s5
-; CHECK-NEXT:    vmovx.f16 s1, s7
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vins.f16 s1, s7
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: shuffle3_i16:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    vmov q1, q0
+; CHECK-LV-NEXT:    vmovx.f16 s2, s5
+; CHECK-LV-NEXT:    vmovx.f16 s0, s4
+; CHECK-LV-NEXT:    vins.f16 s5, s4
+; CHECK-LV-NEXT:    vins.f16 s2, s0
+; CHECK-LV-NEXT:    vmov.f32 s3, s5
+; CHECK-LV-NEXT:    vmovx.f16 s1, s7
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vins.f16 s1, s7
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: shuffle3_i16:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    vmovx.f16 s5, s3
+; CHECK-LIS-NEXT:    vmovx.f16 s6, s1
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s0
+; CHECK-LIS-NEXT:    vins.f16 s1, s0
+; CHECK-LIS-NEXT:    vins.f16 s6, s4
+; CHECK-LIS-NEXT:    vins.f16 s5, s3
+; CHECK-LIS-NEXT:    vmov.f32 s7, s1
+; CHECK-LIS-NEXT:    vmov.f32 s4, s2
+; CHECK-LIS-NEXT:    vmov q0, q1
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x i16> %src, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 7, i32 6, i32 3, i32 1, i32 2, i32 0>
   ret <8 x i16> %out
@@ -476,42 +491,79 @@ entry:
 }
 
 define arm_aapcs_vfpcc <16 x i8> @shuffle3_i8(<16 x i8> %src) {
-; CHECK-LABEL: shuffle3_i8:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vmov.u8 r0, q0[4]
-; CHECK-NEXT:    vmov.8 q0[0], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[5]
-; CHECK-NEXT:    vmov.8 q0[1], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[15]
-; CHECK-NEXT:    vmov.8 q0[2], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[7]
-; CHECK-NEXT:    vmov.8 q0[3], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[14]
-; CHECK-NEXT:    vmov.8 q0[4], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[9]
-; CHECK-NEXT:    vmov.8 q0[5], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[6]
-; CHECK-NEXT:    vmov.8 q0[6], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[3]
-; CHECK-NEXT:    vmov.8 q0[7], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[10]
-; CHECK-NEXT:    vmov.8 q0[8], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[12]
-; CHECK-NEXT:    vmov.8 q0[9], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[1]
-; CHECK-NEXT:    vmov.8 q0[10], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[13]
-; CHECK-NEXT:    vmov.8 q0[11], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[2]
-; CHECK-NEXT:    vmov.8 q0[12], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[8]
-; CHECK-NEXT:    vmov.8 q0[13], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[0]
-; CHECK-NEXT:    vmov.8 q0[14], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[11]
-; CHECK-NEXT:    vmov.8 q0[15], r0
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: shuffle3_i8:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    vmov q1, q0
+; CHECK-LV-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-LV-NEXT:    vmov.8 q0[0], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[5]
+; CHECK-LV-NEXT:    vmov.8 q0[1], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[15]
+; CHECK-LV-NEXT:    vmov.8 q0[2], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[7]
+; CHECK-LV-NEXT:    vmov.8 q0[3], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[14]
+; CHECK-LV-NEXT:    vmov.8 q0[4], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[9]
+; CHECK-LV-NEXT:    vmov.8 q0[5], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-LV-NEXT:    vmov.8 q0[6], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[3]
+; CHECK-LV-NEXT:    vmov.8 q0[7], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[10]
+; CHECK-LV-NEXT:    vmov.8 q0[8], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[12]
+; CHECK-LV-NEXT:    vmov.8 q0[9], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[1]
+; CHECK-LV-NEXT:    vmov.8 q0[10], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[13]
+; CHECK-LV-NEXT:    vmov.8 q0[11], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[2]
+; CHECK-LV-NEXT:    vmov.8 q0[12], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[8]
+; CHECK-LV-NEXT:    vmov.8 q0[13], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[0]
+; CHECK-LV-NEXT:    vmov.8 q0[14], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[11]
+; CHECK-LV-NEXT:    vmov.8 q0[15], r0
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: shuffle3_i8:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-LIS-NEXT:    vmov.8 q1[0], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-LIS-NEXT:    vmov.8 q1[1], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-LIS-NEXT:    vmov.8 q1[2], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[7]
+; CHECK-LIS-NEXT:    vmov.8 q1[3], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-LIS-NEXT:    vmov.8 q1[4], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[9]
+; CHECK-LIS-NEXT:    vmov.8 q1[5], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[6]
+; CHECK-LIS-NEXT:    vmov.8 q1[6], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-LIS-NEXT:    vmov.8 q1[7], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-LIS-NEXT:    vmov.8 q1[8], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-LIS-NEXT:    vmov.8 q1[9], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-LIS-NEXT:    vmov.8 q1[10], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-LIS-NEXT:    vmov.8 q1[11], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-LIS-NEXT:    vmov.8 q1[12], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-LIS-NEXT:    vmov.8 q1[13], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[0]
+; CHECK-LIS-NEXT:    vmov.8 q1[14], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[11]
+; CHECK-LIS-NEXT:    vmov.8 q1[15], r0
+; CHECK-LIS-NEXT:    vmov q0, q1
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %out = shufflevector <16 x i8> %src, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 15, i32 7, i32 14, i32 9, i32 6, i32 3, i32 10, i32 12, i32 1, i32 13, i32 2, i32 8, i32 0, i32 11>
   ret <16 x i8> %out
@@ -1143,18 +1195,31 @@ entry:
 }
 
 define arm_aapcs_vfpcc <8 x half> @shuffle3_f16(<8 x half> %src) {
-; CHECK-LABEL: shuffle3_f16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vmovx.f16 s2, s5
-; CHECK-NEXT:    vmovx.f16 s0, s4
-; CHECK-NEXT:    vins.f16 s5, s4
-; CHECK-NEXT:    vins.f16 s2, s0
-; CHECK-NEXT:    vmov.f32 s3, s5
-; CHECK-NEXT:    vmovx.f16 s1, s7
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vins.f16 s1, s7
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: shuffle3_f16:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    vmov q1, q0
+; CHECK-LV-NEXT:    vmovx.f16 s2, s5
+; CHECK-LV-NEXT:    vmovx.f16 s0, s4
+; CHECK-LV-NEXT:    vins.f16 s5, s4
+; CHECK-LV-NEXT:    vins.f16 s2, s0
+; CHECK-LV-NEXT:    vmov.f32 s3, s5
+; CHECK-LV-NEXT:    vmovx.f16 s1, s7
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vins.f16 s1, s7
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: shuffle3_f16:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    vmovx.f16 s5, s3
+; CHECK-LIS-NEXT:    vmovx.f16 s6, s1
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s0
+; CHECK-LIS-NEXT:    vins.f16 s1, s0
+; CHECK-LIS-NEXT:    vins.f16 s6, s4
+; CHECK-LIS-NEXT:    vins.f16 s5, s3
+; CHECK-LIS-NEXT:    vmov.f32 s7, s1
+; CHECK-LIS-NEXT:    vmov.f32 s4, s2
+; CHECK-LIS-NEXT:    vmov q0, q1
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x half> %src, <8 x half> undef, <8 x i32> <i32 4, i32 5, i32 7, i32 6, i32 3, i32 1, i32 2, i32 0>
   ret <8 x half> %out
diff --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
index 4895eabb71ec060..8a94e571e983665 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,CHECK-LV
+; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,CHECK-LIS
 
 ; i32
 
@@ -67,46 +68,87 @@ entry:
 }
 
 define void @vld3_v8i32(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v8i32:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s10, s2
-; CHECK-NEXT:    vmov.f32 s13, s0
-; CHECK-NEXT:    vmov.f32 s14, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s9, s7
-; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmov.f32 s15, s18
-; CHECK-NEXT:    vmov.f32 s11, s17
-; CHECK-NEXT:    vadd.i32 q2, q2, q3
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s16
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s3, s19
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vadd.i32 q0, q2, q0
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s16, s9
-; CHECK-NEXT:    vmov.f32 s19, s14
-; CHECK-NEXT:    vmov.f32 s20, s8
-; CHECK-NEXT:    vmov.f32 s21, s11
-; CHECK-NEXT:    vmov.f32 s23, s13
-; CHECK-NEXT:    vadd.i32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s4, s10
-; CHECK-NEXT:    vmov.f32 s6, s12
-; CHECK-NEXT:    vmov.f32 s7, s15
-; CHECK-NEXT:    vadd.i32 q1, q4, q1
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: vld3_v8i32:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LV-NEXT:    vmov.f32 s10, s2
+; CHECK-LV-NEXT:    vmov.f32 s13, s0
+; CHECK-LV-NEXT:    vmov.f32 s14, s3
+; CHECK-LV-NEXT:    vmov.f32 s8, s4
+; CHECK-LV-NEXT:    vmov.f32 s9, s7
+; CHECK-LV-NEXT:    vmov.f32 s12, s5
+; CHECK-LV-NEXT:    vmov.f32 s15, s18
+; CHECK-LV-NEXT:    vmov.f32 s11, s17
+; CHECK-LV-NEXT:    vadd.i32 q2, q2, q3
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vmov.f32 s2, s16
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LV-NEXT:    vmov.f32 s3, s19
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LV-NEXT:    vadd.i32 q0, q2, q0
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LV-NEXT:    vmov.f32 s17, s4
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LV-NEXT:    vmov.f32 s18, s7
+; CHECK-LV-NEXT:    vmov.f32 s22, s6
+; CHECK-LV-NEXT:    vmov.f32 s16, s9
+; CHECK-LV-NEXT:    vmov.f32 s19, s14
+; CHECK-LV-NEXT:    vmov.f32 s20, s8
+; CHECK-LV-NEXT:    vmov.f32 s21, s11
+; CHECK-LV-NEXT:    vmov.f32 s23, s13
+; CHECK-LV-NEXT:    vadd.i32 q4, q5, q4
+; CHECK-LV-NEXT:    vmov.f32 s4, s10
+; CHECK-LV-NEXT:    vmov.f32 s6, s12
+; CHECK-LV-NEXT:    vmov.f32 s7, s15
+; CHECK-LV-NEXT:    vadd.i32 q1, q4, q1
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: vld3_v8i32:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LIS-NEXT:    vmov.f32 s10, s2
+; CHECK-LIS-NEXT:    vmov.f32 s13, s0
+; CHECK-LIS-NEXT:    vmov.f32 s14, s3
+; CHECK-LIS-NEXT:    vmov.f32 s8, s4
+; CHECK-LIS-NEXT:    vmov.f32 s9, s7
+; CHECK-LIS-NEXT:    vmov.f32 s12, s5
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s11, s17
+; CHECK-LIS-NEXT:    vmov.f32 s0, s6
+; CHECK-LIS-NEXT:    vadd.i32 q2, q2, q3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s16
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s3, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-LIS-NEXT:    vadd.i32 q0, q2, q0
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LIS-NEXT:    vmov.f32 s13, s4
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s14, s7
+; CHECK-LIS-NEXT:    vmov.f32 s22, s6
+; CHECK-LIS-NEXT:    vmov.f32 s12, s9
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s20, s8
+; CHECK-LIS-NEXT:    vmov.f32 s21, s11
+; CHECK-LIS-NEXT:    vmov.f32 s23, s17
+; CHECK-LIS-NEXT:    vadd.i32 q3, q5, q3
+; CHECK-LIS-NEXT:    vmov.f32 s4, s10
+; CHECK-LIS-NEXT:    vmov.f32 s6, s16
+; CHECK-LIS-NEXT:    vmov.f32 s7, s19
+; CHECK-LIS-NEXT:    vadd.i32 q1, q3, q1
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %l1 = load <24 x i32>, ptr %src, align 4
   %s1 = shufflevector <24 x i32> %l1, <24 x i32> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
@@ -119,80 +161,155 @@ entry:
 }
 
 define void @vld3_v16i32(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v16i32:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
-; CHECK-NEXT:    vldrw.u32 q6, [r0, #176]
-; CHECK-NEXT:    vmov.f32 s10, s2
-; CHECK-NEXT:    vmov.f32 s13, s0
-; CHECK-NEXT:    vmov.f32 s14, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s9, s7
-; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmov.f32 s15, s18
-; CHECK-NEXT:    vmov.f32 s11, s17
-; CHECK-NEXT:    vadd.i32 q2, q2, q3
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s16
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s3, s19
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vadd.i32 q0, q2, q0
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s16, s9
-; CHECK-NEXT:    vmov.f32 s19, s14
-; CHECK-NEXT:    vmov.f32 s20, s8
-; CHECK-NEXT:    vmov.f32 s21, s11
-; CHECK-NEXT:    vmov.f32 s23, s13
-; CHECK-NEXT:    vmov.f32 s4, s10
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #160]
-; CHECK-NEXT:    vmov.f32 s6, s12
-; CHECK-NEXT:    vadd.i32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s7, s15
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #144]
-; CHECK-NEXT:    vadd.i32 q1, q4, q1
-; CHECK-NEXT:    vmov.f32 s18, s10
-; CHECK-NEXT:    vmov.f32 s21, s8
-; CHECK-NEXT:    vmov.f32 s22, s11
-; CHECK-NEXT:    vmov.f32 s16, s12
-; CHECK-NEXT:    vmov.f32 s17, s15
-; CHECK-NEXT:    vmov.f32 s20, s13
-; CHECK-NEXT:    vmov.f32 s23, s26
-; CHECK-NEXT:    vmov.f32 s19, s25
-; CHECK-NEXT:    vadd.i32 q4, q4, q5
-; CHECK-NEXT:    vmov.f32 s8, s14
-; CHECK-NEXT:    vmov.f32 s10, s24
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #112]
-; CHECK-NEXT:    vmov.f32 s11, s27
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #128]
-; CHECK-NEXT:    vadd.i32 q2, q4, q2
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #96]
-; CHECK-NEXT:    vmov.f32 s25, s12
-; CHECK-NEXT:    vstrw.32 q2, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s26, s15
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s30, s14
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vmov.f32 s24, s17
-; CHECK-NEXT:    vmov.f32 s27, s22
-; CHECK-NEXT:    vmov.f32 s28, s16
-; CHECK-NEXT:    vmov.f32 s29, s19
-; CHECK-NEXT:    vmov.f32 s31, s21
-; CHECK-NEXT:    vadd.i32 q6, q7, q6
-; CHECK-NEXT:    vmov.f32 s12, s18
-; CHECK-NEXT:    vmov.f32 s14, s20
-; CHECK-NEXT:    vmov.f32 s15, s23
-; CHECK-NEXT:    vadd.i32 q3, q6, q3
-; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: vld3_v16i32:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LV-NEXT:    vldrw.u32 q6, [r0, #176]
+; CHECK-LV-NEXT:    vmov.f32 s10, s2
+; CHECK-LV-NEXT:    vmov.f32 s13, s0
+; CHECK-LV-NEXT:    vmov.f32 s14, s3
+; CHECK-LV-NEXT:    vmov.f32 s8, s4
+; CHECK-LV-NEXT:    vmov.f32 s9, s7
+; CHECK-LV-NEXT:    vmov.f32 s12, s5
+; CHECK-LV-NEXT:    vmov.f32 s15, s18
+; CHECK-LV-NEXT:    vmov.f32 s11, s17
+; CHECK-LV-NEXT:    vadd.i32 q2, q2, q3
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vmov.f32 s2, s16
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LV-NEXT:    vmov.f32 s3, s19
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LV-NEXT:    vadd.i32 q0, q2, q0
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LV-NEXT:    vmov.f32 s17, s4
+; CHECK-LV-NEXT:    vmov.f32 s18, s7
+; CHECK-LV-NEXT:    vmov.f32 s22, s6
+; CHECK-LV-NEXT:    vmov.f32 s16, s9
+; CHECK-LV-NEXT:    vmov.f32 s19, s14
+; CHECK-LV-NEXT:    vmov.f32 s20, s8
+; CHECK-LV-NEXT:    vmov.f32 s21, s11
+; CHECK-LV-NEXT:    vmov.f32 s23, s13
+; CHECK-LV-NEXT:    vmov.f32 s4, s10
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #160]
+; CHECK-LV-NEXT:    vmov.f32 s6, s12
+; CHECK-LV-NEXT:    vadd.i32 q4, q5, q4
+; CHECK-LV-NEXT:    vmov.f32 s7, s15
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #144]
+; CHECK-LV-NEXT:    vadd.i32 q1, q4, q1
+; CHECK-LV-NEXT:    vmov.f32 s18, s10
+; CHECK-LV-NEXT:    vmov.f32 s21, s8
+; CHECK-LV-NEXT:    vmov.f32 s22, s11
+; CHECK-LV-NEXT:    vmov.f32 s16, s12
+; CHECK-LV-NEXT:    vmov.f32 s17, s15
+; CHECK-LV-NEXT:    vmov.f32 s20, s13
+; CHECK-LV-NEXT:    vmov.f32 s23, s26
+; CHECK-LV-NEXT:    vmov.f32 s19, s25
+; CHECK-LV-NEXT:    vadd.i32 q4, q4, q5
+; CHECK-LV-NEXT:    vmov.f32 s8, s14
+; CHECK-LV-NEXT:    vmov.f32 s10, s24
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #112]
+; CHECK-LV-NEXT:    vmov.f32 s11, s27
+; CHECK-LV-NEXT:    vldrw.u32 q5, [r0, #128]
+; CHECK-LV-NEXT:    vadd.i32 q2, q4, q2
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #96]
+; CHECK-LV-NEXT:    vmov.f32 s25, s12
+; CHECK-LV-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-LV-NEXT:    vmov.f32 s26, s15
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LV-NEXT:    vmov.f32 s30, s14
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LV-NEXT:    vmov.f32 s24, s17
+; CHECK-LV-NEXT:    vmov.f32 s27, s22
+; CHECK-LV-NEXT:    vmov.f32 s28, s16
+; CHECK-LV-NEXT:    vmov.f32 s29, s19
+; CHECK-LV-NEXT:    vmov.f32 s31, s21
+; CHECK-LV-NEXT:    vadd.i32 q6, q7, q6
+; CHECK-LV-NEXT:    vmov.f32 s12, s18
+; CHECK-LV-NEXT:    vmov.f32 s14, s20
+; CHECK-LV-NEXT:    vmov.f32 s15, s23
+; CHECK-LV-NEXT:    vadd.i32 q3, q6, q3
+; CHECK-LV-NEXT:    vstrw.32 q3, [r1, #32]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: vld3_v16i32:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LIS-NEXT:    vldrw.u32 q6, [r0, #176]
+; CHECK-LIS-NEXT:    vmov.f32 s10, s2
+; CHECK-LIS-NEXT:    vmov.f32 s13, s0
+; CHECK-LIS-NEXT:    vmov.f32 s14, s3
+; CHECK-LIS-NEXT:    vmov.f32 s8, s4
+; CHECK-LIS-NEXT:    vmov.f32 s9, s7
+; CHECK-LIS-NEXT:    vmov.f32 s12, s5
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s11, s17
+; CHECK-LIS-NEXT:    vmov.f32 s0, s6
+; CHECK-LIS-NEXT:    vadd.i32 q2, q2, q3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s16
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s3, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-LIS-NEXT:    vadd.i32 q0, q2, q0
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LIS-NEXT:    vmov.f32 s13, s4
+; CHECK-LIS-NEXT:    vmov.f32 s14, s7
+; CHECK-LIS-NEXT:    vmov.f32 s22, s6
+; CHECK-LIS-NEXT:    vmov.f32 s12, s9
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s20, s8
+; CHECK-LIS-NEXT:    vmov.f32 s21, s11
+; CHECK-LIS-NEXT:    vmov.f32 s23, s17
+; CHECK-LIS-NEXT:    vadd.i32 q3, q5, q3
+; CHECK-LIS-NEXT:    vmov.f32 s4, s10
+; CHECK-LIS-NEXT:    vmov.f32 s7, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0, #160]
+; CHECK-LIS-NEXT:    vmov.f32 s6, s16
+; CHECK-LIS-NEXT:    vadd.i32 q1, q3, q1
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #144]
+; CHECK-LIS-NEXT:    vmov.f32 s18, s10
+; CHECK-LIS-NEXT:    vmov.f32 s21, s8
+; CHECK-LIS-NEXT:    vmov.f32 s22, s11
+; CHECK-LIS-NEXT:    vmov.f32 s16, s12
+; CHECK-LIS-NEXT:    vmov.f32 s17, s15
+; CHECK-LIS-NEXT:    vmov.f32 s20, s13
+; CHECK-LIS-NEXT:    vmov.f32 s23, s26
+; CHECK-LIS-NEXT:    vmov.f32 s19, s25
+; CHECK-LIS-NEXT:    vmov.f32 s8, s14
+; CHECK-LIS-NEXT:    vadd.i32 q4, q4, q5
+; CHECK-LIS-NEXT:    vmov.f32 s10, s24
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #112]
+; CHECK-LIS-NEXT:    vmov.f32 s11, s27
+; CHECK-LIS-NEXT:    vldrw.u32 q6, [r0, #128]
+; CHECK-LIS-NEXT:    vadd.i32 q2, q4, q2
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #96]
+; CHECK-LIS-NEXT:    vmov.f32 s21, s12
+; CHECK-LIS-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-LIS-NEXT:    vmov.f32 s22, s15
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s30, s14
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LIS-NEXT:    vmov.f32 s20, s17
+; CHECK-LIS-NEXT:    vmov.f32 s23, s26
+; CHECK-LIS-NEXT:    vmov.f32 s28, s16
+; CHECK-LIS-NEXT:    vmov.f32 s29, s19
+; CHECK-LIS-NEXT:    vmov.f32 s31, s25
+; CHECK-LIS-NEXT:    vadd.i32 q5, q7, q5
+; CHECK-LIS-NEXT:    vmov.f32 s12, s18
+; CHECK-LIS-NEXT:    vmov.f32 s14, s24
+; CHECK-LIS-NEXT:    vmov.f32 s15, s27
+; CHECK-LIS-NEXT:    vadd.i32 q3, q5, q3
+; CHECK-LIS-NEXT:    vstrw.32 q3, [r1, #32]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %l1 = load <48 x i32>, ptr %src, align 4
   %s1 = shufflevector <48 x i32> %l1, <48 x i32> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
@@ -247,32 +364,59 @@ entry:
 }
 
 define void @vld3_v4i16(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v4i16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r4, r5, r6, lr}
-; CHECK-NEXT:    push {r4, r5, r6, lr}
-; CHECK-NEXT:    vldrw.u32 q0, [r0]
-; CHECK-NEXT:    vldrh.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.u16 r5, q0[6]
-; CHECK-NEXT:    vmov.u16 r6, q0[0]
-; CHECK-NEXT:    vmov r0, r3, d2
-; CHECK-NEXT:    vmov.u16 lr, q0[2]
-; CHECK-NEXT:    vmov r2, r4, d3
-; CHECK-NEXT:    vmov q1[2], q1[0], r6, r5
-; CHECK-NEXT:    vmov.u16 r5, q0[7]
-; CHECK-NEXT:    vmov.u16 r6, q0[1]
-; CHECK-NEXT:    vmov q2[2], q2[0], r6, r5
-; CHECK-NEXT:    vmov.u16 r5, q0[3]
-; CHECK-NEXT:    vmov.u16 r6, q0[4]
-; CHECK-NEXT:    vmov q1[3], q1[1], r5, r3
-; CHECK-NEXT:    vmov q2[3], q2[1], r6, r2
-; CHECK-NEXT:    vmov.u16 r12, q0[5]
-; CHECK-NEXT:    vadd.i32 q0, q1, q2
-; CHECK-NEXT:    vmov q1[2], q1[0], lr, r0
-; CHECK-NEXT:    vmov q1[3], q1[1], r12, r4
-; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    vstrh.32 q0, [r1]
-; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-LV-LABEL: vld3_v4i16:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-LV-NEXT:    push {r4, r5, r6, lr}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LV-NEXT:    vldrh.u32 q1, [r0, #16]
+; CHECK-LV-NEXT:    vmov.u16 r5, q0[6]
+; CHECK-LV-NEXT:    vmov.u16 r6, q0[0]
+; CHECK-LV-NEXT:    vmov r0, r3, d2
+; CHECK-LV-NEXT:    vmov.u16 lr, q0[2]
+; CHECK-LV-NEXT:    vmov r2, r4, d3
+; CHECK-LV-NEXT:    vmov q1[2], q1[0], r6, r5
+; CHECK-LV-NEXT:    vmov.u16 r5, q0[7]
+; CHECK-LV-NEXT:    vmov.u16 r6, q0[1]
+; CHECK-LV-NEXT:    vmov q2[2], q2[0], r6, r5
+; CHECK-LV-NEXT:    vmov.u16 r5, q0[3]
+; CHECK-LV-NEXT:    vmov.u16 r6, q0[4]
+; CHECK-LV-NEXT:    vmov q1[3], q1[1], r5, r3
+; CHECK-LV-NEXT:    vmov q2[3], q2[1], r6, r2
+; CHECK-LV-NEXT:    vmov.u16 r12, q0[5]
+; CHECK-LV-NEXT:    vadd.i32 q0, q1, q2
+; CHECK-LV-NEXT:    vmov q1[2], q1[0], lr, r0
+; CHECK-LV-NEXT:    vmov q1[3], q1[1], r12, r4
+; CHECK-LV-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-LV-NEXT:    vstrh.32 q0, [r1]
+; CHECK-LV-NEXT:    pop {r4, r5, r6, pc}
+;
+; CHECK-LIS-LABEL: vld3_v4i16:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-LIS-NEXT:    push {r4, r5, r6, lr}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LIS-NEXT:    vldrh.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.u16 r5, q0[6]
+; CHECK-LIS-NEXT:    vmov.u16 r6, q0[0]
+; CHECK-LIS-NEXT:    vmov r0, r2, d2
+; CHECK-LIS-NEXT:    vmov.u16 r12, q0[2]
+; CHECK-LIS-NEXT:    vmov r3, r4, d3
+; CHECK-LIS-NEXT:    vmov q1[2], q1[0], r6, r5
+; CHECK-LIS-NEXT:    vmov.u16 r5, q0[7]
+; CHECK-LIS-NEXT:    vmov.u16 r6, q0[1]
+; CHECK-LIS-NEXT:    vmov q2[2], q2[0], r6, r5
+; CHECK-LIS-NEXT:    vmov.u16 r5, q0[3]
+; CHECK-LIS-NEXT:    vmov.u16 r6, q0[4]
+; CHECK-LIS-NEXT:    vmov q1[3], q1[1], r5, r2
+; CHECK-LIS-NEXT:    vmov q2[3], q2[1], r6, r3
+; CHECK-LIS-NEXT:    vmov.u16 lr, q0[5]
+; CHECK-LIS-NEXT:    vadd.i32 q0, q1, q2
+; CHECK-LIS-NEXT:    vmov q1[2], q1[0], r12, r0
+; CHECK-LIS-NEXT:    vmov q1[3], q1[1], lr, r4
+; CHECK-LIS-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-LIS-NEXT:    vstrh.32 q0, [r1]
+; CHECK-LIS-NEXT:    pop {r4, r5, r6, pc}
 entry:
   %l1 = load <12 x i16>, ptr %src, align 4
   %s1 = shufflevector <12 x i16> %l1, <12 x i16> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
@@ -340,86 +484,167 @@ entry:
 }
 
 define void @vld3_v16i16(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v16i16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT:    vmovx.f16 s6, s2
-; CHECK-NEXT:    vmov.f32 s4, s1
-; CHECK-NEXT:    vins.f16 s4, s6
-; CHECK-NEXT:    vmovx.f16 s6, s9
-; CHECK-NEXT:    vmov.f32 s5, s8
-; CHECK-NEXT:    vmovx.f16 s7, s12
-; CHECK-NEXT:    vins.f16 s5, s6
-; CHECK-NEXT:    vmov.f32 s6, s11
-; CHECK-NEXT:    vins.f16 s6, s7
-; CHECK-NEXT:    vmovx.f16 s16, s15
-; CHECK-NEXT:    vmov.f32 s7, s14
-; CHECK-NEXT:    vmovx.f16 s17, s3
-; CHECK-NEXT:    vins.f16 s7, s16
-; CHECK-NEXT:    vmovx.f16 s16, s0
-; CHECK-NEXT:    vins.f16 s16, s2
-; CHECK-NEXT:    vmovx.f16 s2, s1
-; CHECK-NEXT:    vins.f16 s0, s2
-; CHECK-NEXT:    vmovx.f16 s2, s8
-; CHECK-NEXT:    vins.f16 s3, s2
-; CHECK-NEXT:    vmovx.f16 s2, s11
-; CHECK-NEXT:    vmovx.f16 s8, s14
-; CHECK-NEXT:    vmovx.f16 s18, s10
-; CHECK-NEXT:    vmovx.f16 s19, s13
-; CHECK-NEXT:    vins.f16 s10, s2
-; CHECK-NEXT:    vins.f16 s13, s8
-; CHECK-NEXT:    vmov.f32 s1, s3
-; CHECK-NEXT:    vins.f16 s18, s12
-; CHECK-NEXT:    vins.f16 s19, s15
-; CHECK-NEXT:    vmov.f32 s3, s13
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
-; CHECK-NEXT:    vins.f16 s17, s9
-; CHECK-NEXT:    vmov.f32 s2, s10
-; CHECK-NEXT:    vadd.i16 q0, q0, q4
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
-; CHECK-NEXT:    vadd.i16 q0, q0, q1
-; CHECK-NEXT:    vmovx.f16 s6, s14
-; CHECK-NEXT:    vldrw.u32 q4, [r0]
-; CHECK-NEXT:    vins.f16 s6, s8
-; CHECK-NEXT:    vmov.f32 s22, s15
-; CHECK-NEXT:    vmovx.f16 s8, s8
-; CHECK-NEXT:    vins.f16 s22, s8
-; CHECK-NEXT:    vmovx.f16 s8, s11
-; CHECK-NEXT:    vmov.f32 s23, s10
-; CHECK-NEXT:    vmovx.f16 s4, s16
-; CHECK-NEXT:    vins.f16 s23, s8
-; CHECK-NEXT:    vmovx.f16 s8, s17
-; CHECK-NEXT:    vins.f16 s16, s8
-; CHECK-NEXT:    vmovx.f16 s8, s12
-; CHECK-NEXT:    vmovx.f16 s5, s19
-; CHECK-NEXT:    vins.f16 s19, s8
-; CHECK-NEXT:    vmovx.f16 s8, s15
-; CHECK-NEXT:    vmovx.f16 s7, s9
-; CHECK-NEXT:    vins.f16 s14, s8
-; CHECK-NEXT:    vmovx.f16 s8, s10
-; CHECK-NEXT:    vins.f16 s4, s18
-; CHECK-NEXT:    vmov.f32 s20, s17
-; CHECK-NEXT:    vmovx.f16 s18, s18
-; CHECK-NEXT:    vins.f16 s9, s8
-; CHECK-NEXT:    vins.f16 s5, s13
-; CHECK-NEXT:    vins.f16 s20, s18
-; CHECK-NEXT:    vmov.f32 s17, s19
-; CHECK-NEXT:    vins.f16 s7, s11
-; CHECK-NEXT:    vmovx.f16 s13, s13
-; CHECK-NEXT:    vmov.f32 s21, s12
-; CHECK-NEXT:    vmov.f32 s18, s14
-; CHECK-NEXT:    vins.f16 s21, s13
-; CHECK-NEXT:    vmov.f32 s19, s9
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vadd.i16 q1, q4, q1
-; CHECK-NEXT:    vadd.i16 q1, q1, q5
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: vld3_v16i16:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0, #48]
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #64]
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #80]
+; CHECK-LV-NEXT:    vmovx.f16 s6, s2
+; CHECK-LV-NEXT:    vmov.f32 s4, s1
+; CHECK-LV-NEXT:    vins.f16 s4, s6
+; CHECK-LV-NEXT:    vmovx.f16 s6, s9
+; CHECK-LV-NEXT:    vmov.f32 s5, s8
+; CHECK-LV-NEXT:    vmovx.f16 s7, s12
+; CHECK-LV-NEXT:    vins.f16 s5, s6
+; CHECK-LV-NEXT:    vmov.f32 s6, s11
+; CHECK-LV-NEXT:    vins.f16 s6, s7
+; CHECK-LV-NEXT:    vmovx.f16 s16, s15
+; CHECK-LV-NEXT:    vmov.f32 s7, s14
+; CHECK-LV-NEXT:    vmovx.f16 s17, s3
+; CHECK-LV-NEXT:    vins.f16 s7, s16
+; CHECK-LV-NEXT:    vmovx.f16 s16, s0
+; CHECK-LV-NEXT:    vins.f16 s16, s2
+; CHECK-LV-NEXT:    vmovx.f16 s2, s1
+; CHECK-LV-NEXT:    vins.f16 s0, s2
+; CHECK-LV-NEXT:    vmovx.f16 s2, s8
+; CHECK-LV-NEXT:    vins.f16 s3, s2
+; CHECK-LV-NEXT:    vmovx.f16 s2, s11
+; CHECK-LV-NEXT:    vmovx.f16 s8, s14
+; CHECK-LV-NEXT:    vmovx.f16 s18, s10
+; CHECK-LV-NEXT:    vmovx.f16 s19, s13
+; CHECK-LV-NEXT:    vins.f16 s10, s2
+; CHECK-LV-NEXT:    vins.f16 s13, s8
+; CHECK-LV-NEXT:    vmov.f32 s1, s3
+; CHECK-LV-NEXT:    vins.f16 s18, s12
+; CHECK-LV-NEXT:    vins.f16 s19, s15
+; CHECK-LV-NEXT:    vmov.f32 s3, s13
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #16]
+; CHECK-LV-NEXT:    vins.f16 s17, s9
+; CHECK-LV-NEXT:    vmov.f32 s2, s10
+; CHECK-LV-NEXT:    vadd.i16 q0, q0, q4
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #32]
+; CHECK-LV-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-LV-NEXT:    vmovx.f16 s6, s14
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-LV-NEXT:    vins.f16 s6, s8
+; CHECK-LV-NEXT:    vmov.f32 s22, s15
+; CHECK-LV-NEXT:    vmovx.f16 s8, s8
+; CHECK-LV-NEXT:    vins.f16 s22, s8
+; CHECK-LV-NEXT:    vmovx.f16 s8, s11
+; CHECK-LV-NEXT:    vmov.f32 s23, s10
+; CHECK-LV-NEXT:    vmovx.f16 s4, s16
+; CHECK-LV-NEXT:    vins.f16 s23, s8
+; CHECK-LV-NEXT:    vmovx.f16 s8, s17
+; CHECK-LV-NEXT:    vins.f16 s16, s8
+; CHECK-LV-NEXT:    vmovx.f16 s8, s12
+; CHECK-LV-NEXT:    vmovx.f16 s5, s19
+; CHECK-LV-NEXT:    vins.f16 s19, s8
+; CHECK-LV-NEXT:    vmovx.f16 s8, s15
+; CHECK-LV-NEXT:    vmovx.f16 s7, s9
+; CHECK-LV-NEXT:    vins.f16 s14, s8
+; CHECK-LV-NEXT:    vmovx.f16 s8, s10
+; CHECK-LV-NEXT:    vins.f16 s4, s18
+; CHECK-LV-NEXT:    vmov.f32 s20, s17
+; CHECK-LV-NEXT:    vmovx.f16 s18, s18
+; CHECK-LV-NEXT:    vins.f16 s9, s8
+; CHECK-LV-NEXT:    vins.f16 s5, s13
+; CHECK-LV-NEXT:    vins.f16 s20, s18
+; CHECK-LV-NEXT:    vmov.f32 s17, s19
+; CHECK-LV-NEXT:    vins.f16 s7, s11
+; CHECK-LV-NEXT:    vmovx.f16 s13, s13
+; CHECK-LV-NEXT:    vmov.f32 s21, s12
+; CHECK-LV-NEXT:    vmov.f32 s18, s14
+; CHECK-LV-NEXT:    vins.f16 s21, s13
+; CHECK-LV-NEXT:    vmov.f32 s19, s9
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LV-NEXT:    vadd.i16 q1, q4, q1
+; CHECK-LV-NEXT:    vadd.i16 q1, q1, q5
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: vld3_v16i16:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0, #48]
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0, #64]
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #80]
+; CHECK-LIS-NEXT:    vmovx.f16 s6, s2
+; CHECK-LIS-NEXT:    vmov.f32 s4, s1
+; CHECK-LIS-NEXT:    vins.f16 s4, s6
+; CHECK-LIS-NEXT:    vmovx.f16 s6, s9
+; CHECK-LIS-NEXT:    vmov.f32 s5, s8
+; CHECK-LIS-NEXT:    vmovx.f16 s7, s12
+; CHECK-LIS-NEXT:    vins.f16 s5, s6
+; CHECK-LIS-NEXT:    vmov.f32 s6, s11
+; CHECK-LIS-NEXT:    vins.f16 s6, s7
+; CHECK-LIS-NEXT:    vmovx.f16 s16, s15
+; CHECK-LIS-NEXT:    vmov.f32 s7, s14
+; CHECK-LIS-NEXT:    vmovx.f16 s17, s3
+; CHECK-LIS-NEXT:    vins.f16 s7, s16
+; CHECK-LIS-NEXT:    vmovx.f16 s16, s0
+; CHECK-LIS-NEXT:    vins.f16 s16, s2
+; CHECK-LIS-NEXT:    vmovx.f16 s2, s1
+; CHECK-LIS-NEXT:    vins.f16 s0, s2
+; CHECK-LIS-NEXT:    vmovx.f16 s2, s8
+; CHECK-LIS-NEXT:    vins.f16 s3, s2
+; CHECK-LIS-NEXT:    vmovx.f16 s2, s11
+; CHECK-LIS-NEXT:    vmovx.f16 s8, s14
+; CHECK-LIS-NEXT:    vmovx.f16 s18, s10
+; CHECK-LIS-NEXT:    vmovx.f16 s19, s13
+; CHECK-LIS-NEXT:    vins.f16 s10, s2
+; CHECK-LIS-NEXT:    vins.f16 s13, s8
+; CHECK-LIS-NEXT:    vmov.f32 s1, s3
+; CHECK-LIS-NEXT:    vins.f16 s18, s12
+; CHECK-LIS-NEXT:    vins.f16 s19, s15
+; CHECK-LIS-NEXT:    vmov.f32 s3, s13
+; CHECK-LIS-NEXT:    vins.f16 s17, s9
+; CHECK-LIS-NEXT:    vmov.f32 s2, s10
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #16]
+; CHECK-LIS-NEXT:    vadd.i16 q0, q0, q4
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-LIS-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #32]
+; CHECK-LIS-NEXT:    vmovx.f16 s10, s14
+; CHECK-LIS-NEXT:    vmov.f32 s22, s15
+; CHECK-LIS-NEXT:    vins.f16 s10, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s4
+; CHECK-LIS-NEXT:    vins.f16 s22, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s7
+; CHECK-LIS-NEXT:    vmov.f32 s23, s6
+; CHECK-LIS-NEXT:    vmovx.f16 s8, s16
+; CHECK-LIS-NEXT:    vins.f16 s23, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s17
+; CHECK-LIS-NEXT:    vins.f16 s16, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s12
+; CHECK-LIS-NEXT:    vmovx.f16 s9, s19
+; CHECK-LIS-NEXT:    vins.f16 s19, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s15
+; CHECK-LIS-NEXT:    vmovx.f16 s11, s5
+; CHECK-LIS-NEXT:    vins.f16 s14, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s6
+; CHECK-LIS-NEXT:    vins.f16 s8, s18
+; CHECK-LIS-NEXT:    vmov.f32 s20, s17
+; CHECK-LIS-NEXT:    vmovx.f16 s18, s18
+; CHECK-LIS-NEXT:    vins.f16 s5, s4
+; CHECK-LIS-NEXT:    vins.f16 s9, s13
+; CHECK-LIS-NEXT:    vins.f16 s20, s18
+; CHECK-LIS-NEXT:    vmov.f32 s17, s19
+; CHECK-LIS-NEXT:    vins.f16 s11, s7
+; CHECK-LIS-NEXT:    vmovx.f16 s13, s13
+; CHECK-LIS-NEXT:    vmov.f32 s21, s12
+; CHECK-LIS-NEXT:    vmov.f32 s18, s14
+; CHECK-LIS-NEXT:    vins.f16 s21, s13
+; CHECK-LIS-NEXT:    vmov.f32 s19, s5
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LIS-NEXT:    vadd.i16 q1, q4, q2
+; CHECK-LIS-NEXT:    vadd.i16 q1, q1, q5
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %l1 = load <48 x i16>, ptr %src, align 4
   %s1 = shufflevector <48 x i16> %l1, <48 x i16> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
@@ -710,35 +935,65 @@ entry:
 ; i64
 
 define void @vld3_v2i64(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v2i64:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    vldrw.u32 q0, [r0]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s12, s2
-; CHECK-NEXT:    vmov.f32 s13, s3
-; CHECK-NEXT:    vmov.f32 s2, s4
-; CHECK-NEXT:    vmov.f32 s3, s5
-; CHECK-NEXT:    vmov r0, r3, d5
-; CHECK-NEXT:    vmov r2, r4, d3
-; CHECK-NEXT:    vmov r6, r7, d0
-; CHECK-NEXT:    vmov r5, r8, d6
-; CHECK-NEXT:    vmov lr, r12, d1
-; CHECK-NEXT:    adds.w r0, r0, lr
-; CHECK-NEXT:    adc.w r3, r3, r12
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adc.w r2, r3, r4
-; CHECK-NEXT:    vmov r3, r4, d4
-; CHECK-NEXT:    adds r6, r6, r5
-; CHECK-NEXT:    adc.w r7, r7, r8
-; CHECK-NEXT:    adds r3, r3, r6
-; CHECK-NEXT:    adcs r7, r4
-; CHECK-NEXT:    vmov q0[2], q0[0], r3, r0
-; CHECK-NEXT:    vmov q0[3], q0[1], r7, r2
-; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; CHECK-LV-LABEL: vld3_v2i64:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; CHECK-LV-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #32]
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #16]
+; CHECK-LV-NEXT:    vmov.f32 s12, s2
+; CHECK-LV-NEXT:    vmov.f32 s13, s3
+; CHECK-LV-NEXT:    vmov.f32 s2, s4
+; CHECK-LV-NEXT:    vmov.f32 s3, s5
+; CHECK-LV-NEXT:    vmov r0, r3, d5
+; CHECK-LV-NEXT:    vmov r2, r4, d3
+; CHECK-LV-NEXT:    vmov r6, r7, d0
+; CHECK-LV-NEXT:    vmov r5, r8, d6
+; CHECK-LV-NEXT:    vmov lr, r12, d1
+; CHECK-LV-NEXT:    adds.w r0, r0, lr
+; CHECK-LV-NEXT:    adc.w r3, r3, r12
+; CHECK-LV-NEXT:    adds r0, r0, r2
+; CHECK-LV-NEXT:    adc.w r2, r3, r4
+; CHECK-LV-NEXT:    vmov r3, r4, d4
+; CHECK-LV-NEXT:    adds r6, r6, r5
+; CHECK-LV-NEXT:    adc.w r7, r7, r8
+; CHECK-LV-NEXT:    adds r3, r3, r6
+; CHECK-LV-NEXT:    adcs r7, r4
+; CHECK-LV-NEXT:    vmov q0[2], q0[0], r3, r0
+; CHECK-LV-NEXT:    vmov q0[3], q0[1], r7, r2
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LV-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+;
+; CHECK-LIS-LABEL: vld3_v2i64:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; CHECK-LIS-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0, #32]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s12, s2
+; CHECK-LIS-NEXT:    vmov.f32 s13, s3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s8
+; CHECK-LIS-NEXT:    vmov.f32 s3, s9
+; CHECK-LIS-NEXT:    vmov r0, r3, d3
+; CHECK-LIS-NEXT:    vmov r2, r4, d5
+; CHECK-LIS-NEXT:    vmov r6, r7, d0
+; CHECK-LIS-NEXT:    vmov r5, r8, d6
+; CHECK-LIS-NEXT:    vmov lr, r12, d1
+; CHECK-LIS-NEXT:    adds.w r0, r0, lr
+; CHECK-LIS-NEXT:    adc.w r3, r3, r12
+; CHECK-LIS-NEXT:    adds r0, r0, r2
+; CHECK-LIS-NEXT:    adc.w r2, r3, r4
+; CHECK-LIS-NEXT:    vmov r3, r4, d2
+; CHECK-LIS-NEXT:    adds r6, r6, r5
+; CHECK-LIS-NEXT:    adc.w r7, r7, r8
+; CHECK-LIS-NEXT:    adds r3, r3, r6
+; CHECK-LIS-NEXT:    adcs r7, r4
+; CHECK-LIS-NEXT:    vmov q0[2], q0[0], r3, r0
+; CHECK-LIS-NEXT:    vmov q0[3], q0[1], r7, r2
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LIS-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
   %l1 = load <6 x i64>, ptr %src, align 4
   %s1 = shufflevector <6 x i64> %l1, <6 x i64> undef, <2 x i32> <i32 0, i32 3>
@@ -751,62 +1006,120 @@ entry:
 }
 
 define void @vld3_v4i64(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v4i64:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12}
-; CHECK-NEXT:    vldrw.u32 q0, [r0]
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #48]
-; CHECK-NEXT:    vmov.f32 s4, s2
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #64]
-; CHECK-NEXT:    vmov.f32 s5, s3
-; CHECK-NEXT:    vmov.f32 s2, s12
-; CHECK-NEXT:    vmov.f32 s3, s13
-; CHECK-NEXT:    vmov r5, r4, d5
-; CHECK-NEXT:    vmov r3, r8, d7
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s24, s22
-; CHECK-NEXT:    vmov.f32 s25, s23
-; CHECK-NEXT:    vmov lr, r12, d1
-; CHECK-NEXT:    vmov.f32 s2, s12
-; CHECK-NEXT:    vmov.f32 s3, s13
-; CHECK-NEXT:    vmov r6, r7, d12
-; CHECK-NEXT:    adds.w r0, r5, lr
-; CHECK-NEXT:    adc.w r5, r4, r12
-; CHECK-NEXT:    adds.w lr, r0, r3
-; CHECK-NEXT:    vmov r4, r2, d10
-; CHECK-NEXT:    adc.w r12, r5, r8
-; CHECK-NEXT:    vmov r5, r0, d8
-; CHECK-NEXT:    adds r6, r6, r4
-; CHECK-NEXT:    adcs r2, r7
-; CHECK-NEXT:    adds r6, r6, r5
-; CHECK-NEXT:    adc.w r8, r2, r0
-; CHECK-NEXT:    vmov r7, r4, d1
-; CHECK-NEXT:    vmov r2, r5, d9
-; CHECK-NEXT:    vmov r3, r0, d0
-; CHECK-NEXT:    adds r2, r2, r7
-; CHECK-NEXT:    adc.w r7, r5, r4
-; CHECK-NEXT:    vmov r5, r4, d7
-; CHECK-NEXT:    adds r2, r2, r5
-; CHECK-NEXT:    adcs r7, r4
-; CHECK-NEXT:    vmov r5, r4, d2
-; CHECK-NEXT:    vmov q1[2], q1[0], r6, r2
-; CHECK-NEXT:    vmov q1[3], q1[1], r8, r7
-; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
-; CHECK-NEXT:    adds r3, r3, r5
-; CHECK-NEXT:    adcs r0, r4
-; CHECK-NEXT:    vmov r4, r5, d4
-; CHECK-NEXT:    adds r3, r3, r4
-; CHECK-NEXT:    vmov q0[2], q0[0], r3, lr
-; CHECK-NEXT:    adcs r0, r5
-; CHECK-NEXT:    vmov q0[3], q0[1], r0, r12
-; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12}
-; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; CHECK-LV-LABEL: vld3_v4i64:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; CHECK-LV-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11, d12}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11, d12}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #16]
+; CHECK-LV-NEXT:    vldrw.u32 q5, [r0, #48]
+; CHECK-LV-NEXT:    vmov.f32 s4, s2
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #64]
+; CHECK-LV-NEXT:    vmov.f32 s5, s3
+; CHECK-LV-NEXT:    vmov.f32 s2, s12
+; CHECK-LV-NEXT:    vmov.f32 s3, s13
+; CHECK-LV-NEXT:    vmov r5, r4, d5
+; CHECK-LV-NEXT:    vmov r3, r8, d7
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #80]
+; CHECK-LV-NEXT:    vmov.f32 s24, s22
+; CHECK-LV-NEXT:    vmov.f32 s25, s23
+; CHECK-LV-NEXT:    vmov lr, r12, d1
+; CHECK-LV-NEXT:    vmov.f32 s2, s12
+; CHECK-LV-NEXT:    vmov.f32 s3, s13
+; CHECK-LV-NEXT:    vmov r6, r7, d12
+; CHECK-LV-NEXT:    adds.w r0, r5, lr
+; CHECK-LV-NEXT:    adc.w r5, r4, r12
+; CHECK-LV-NEXT:    adds.w lr, r0, r3
+; CHECK-LV-NEXT:    vmov r4, r2, d10
+; CHECK-LV-NEXT:    adc.w r12, r5, r8
+; CHECK-LV-NEXT:    vmov r5, r0, d8
+; CHECK-LV-NEXT:    adds r6, r6, r4
+; CHECK-LV-NEXT:    adcs r2, r7
+; CHECK-LV-NEXT:    adds r6, r6, r5
+; CHECK-LV-NEXT:    adc.w r8, r2, r0
+; CHECK-LV-NEXT:    vmov r7, r4, d1
+; CHECK-LV-NEXT:    vmov r2, r5, d9
+; CHECK-LV-NEXT:    vmov r3, r0, d0
+; CHECK-LV-NEXT:    adds r2, r2, r7
+; CHECK-LV-NEXT:    adc.w r7, r5, r4
+; CHECK-LV-NEXT:    vmov r5, r4, d7
+; CHECK-LV-NEXT:    adds r2, r2, r5
+; CHECK-LV-NEXT:    adcs r7, r4
+; CHECK-LV-NEXT:    vmov r5, r4, d2
+; CHECK-LV-NEXT:    vmov q1[2], q1[0], r6, r2
+; CHECK-LV-NEXT:    vmov q1[3], q1[1], r8, r7
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1, #16]
+; CHECK-LV-NEXT:    adds r3, r3, r5
+; CHECK-LV-NEXT:    adcs r0, r4
+; CHECK-LV-NEXT:    vmov r4, r5, d4
+; CHECK-LV-NEXT:    adds r3, r3, r4
+; CHECK-LV-NEXT:    vmov q0[2], q0[0], r3, lr
+; CHECK-LV-NEXT:    adcs r0, r5
+; CHECK-LV-NEXT:    vmov q0[3], q0[1], r0, r12
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11, d12}
+; CHECK-LV-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+;
+; CHECK-LIS-LABEL: vld3_v4i64:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; CHECK-LIS-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11, d12}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11, d12}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vldrw.u32 q5, [r0, #48]
+; CHECK-LIS-NEXT:    vmov.f32 s8, s2
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #64]
+; CHECK-LIS-NEXT:    vmov.f32 s9, s3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s12
+; CHECK-LIS-NEXT:    vmov.f32 s3, s13
+; CHECK-LIS-NEXT:    vmov r2, r3, d3
+; CHECK-LIS-NEXT:    vmov r4, r8, d7
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #80]
+; CHECK-LIS-NEXT:    vmov.f32 s24, s22
+; CHECK-LIS-NEXT:    vmov.f32 s25, s23
+; CHECK-LIS-NEXT:    vmov.f32 s7, s19
+; CHECK-LIS-NEXT:    vmov lr, r12, d1
+; CHECK-LIS-NEXT:    vmov.f32 s2, s12
+; CHECK-LIS-NEXT:    vmov.f32 s3, s13
+; CHECK-LIS-NEXT:    vmov r6, r7, d12
+; CHECK-LIS-NEXT:    adds.w r0, r2, lr
+; CHECK-LIS-NEXT:    adc.w r2, r3, r12
+; CHECK-LIS-NEXT:    adds.w lr, r0, r4
+; CHECK-LIS-NEXT:    vmov r3, r5, d10
+; CHECK-LIS-NEXT:    adc.w r12, r2, r8
+; CHECK-LIS-NEXT:    vmov r2, r0, d8
+; CHECK-LIS-NEXT:    adds r3, r3, r6
+; CHECK-LIS-NEXT:    adcs r7, r5
+; CHECK-LIS-NEXT:    adds r2, r2, r3
+; CHECK-LIS-NEXT:    adc.w r8, r7, r0
+; CHECK-LIS-NEXT:    vmov r6, r5, d1
+; CHECK-LIS-NEXT:    vmov r3, r7, d3
+; CHECK-LIS-NEXT:    vmov r4, r0, d0
+; CHECK-LIS-NEXT:    adds r3, r3, r6
+; CHECK-LIS-NEXT:    adcs r7, r5
+; CHECK-LIS-NEXT:    vmov r6, r5, d7
+; CHECK-LIS-NEXT:    adds r3, r3, r6
+; CHECK-LIS-NEXT:    adcs r7, r5
+; CHECK-LIS-NEXT:    vmov r6, r5, d4
+; CHECK-LIS-NEXT:    adds r4, r4, r6
+; CHECK-LIS-NEXT:    adcs r0, r5
+; CHECK-LIS-NEXT:    vmov r5, r6, d2
+; CHECK-LIS-NEXT:    vmov q1[2], q1[0], r2, r3
+; CHECK-LIS-NEXT:    vmov q1[3], q1[1], r8, r7
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1, #16]
+; CHECK-LIS-NEXT:    adds r4, r4, r5
+; CHECK-LIS-NEXT:    vmov q0[2], q0[0], r4, lr
+; CHECK-LIS-NEXT:    adcs r0, r6
+; CHECK-LIS-NEXT:    vmov q0[3], q0[1], r0, r12
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11, d12}
+; CHECK-LIS-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
   %l1 = load <12 x i64>, ptr %src, align 4
   %s1 = shufflevector <12 x i64> %l1, <12 x i64> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
@@ -881,46 +1194,87 @@ entry:
 }
 
 define void @vld3_v8f32(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v8f32:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s10, s2
-; CHECK-NEXT:    vmov.f32 s13, s0
-; CHECK-NEXT:    vmov.f32 s14, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s9, s7
-; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmov.f32 s15, s18
-; CHECK-NEXT:    vmov.f32 s11, s17
-; CHECK-NEXT:    vadd.f32 q2, q2, q3
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s16
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s3, s19
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vadd.f32 q0, q2, q0
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s16, s9
-; CHECK-NEXT:    vmov.f32 s19, s14
-; CHECK-NEXT:    vmov.f32 s20, s8
-; CHECK-NEXT:    vmov.f32 s21, s11
-; CHECK-NEXT:    vmov.f32 s23, s13
-; CHECK-NEXT:    vadd.f32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s4, s10
-; CHECK-NEXT:    vmov.f32 s6, s12
-; CHECK-NEXT:    vmov.f32 s7, s15
-; CHECK-NEXT:    vadd.f32 q1, q4, q1
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: vld3_v8f32:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LV-NEXT:    vmov.f32 s10, s2
+; CHECK-LV-NEXT:    vmov.f32 s13, s0
+; CHECK-LV-NEXT:    vmov.f32 s14, s3
+; CHECK-LV-NEXT:    vmov.f32 s8, s4
+; CHECK-LV-NEXT:    vmov.f32 s9, s7
+; CHECK-LV-NEXT:    vmov.f32 s12, s5
+; CHECK-LV-NEXT:    vmov.f32 s15, s18
+; CHECK-LV-NEXT:    vmov.f32 s11, s17
+; CHECK-LV-NEXT:    vadd.f32 q2, q2, q3
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vmov.f32 s2, s16
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LV-NEXT:    vmov.f32 s3, s19
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LV-NEXT:    vadd.f32 q0, q2, q0
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LV-NEXT:    vmov.f32 s17, s4
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LV-NEXT:    vmov.f32 s18, s7
+; CHECK-LV-NEXT:    vmov.f32 s22, s6
+; CHECK-LV-NEXT:    vmov.f32 s16, s9
+; CHECK-LV-NEXT:    vmov.f32 s19, s14
+; CHECK-LV-NEXT:    vmov.f32 s20, s8
+; CHECK-LV-NEXT:    vmov.f32 s21, s11
+; CHECK-LV-NEXT:    vmov.f32 s23, s13
+; CHECK-LV-NEXT:    vadd.f32 q4, q5, q4
+; CHECK-LV-NEXT:    vmov.f32 s4, s10
+; CHECK-LV-NEXT:    vmov.f32 s6, s12
+; CHECK-LV-NEXT:    vmov.f32 s7, s15
+; CHECK-LV-NEXT:    vadd.f32 q1, q4, q1
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: vld3_v8f32:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LIS-NEXT:    vmov.f32 s10, s2
+; CHECK-LIS-NEXT:    vmov.f32 s13, s0
+; CHECK-LIS-NEXT:    vmov.f32 s14, s3
+; CHECK-LIS-NEXT:    vmov.f32 s8, s4
+; CHECK-LIS-NEXT:    vmov.f32 s9, s7
+; CHECK-LIS-NEXT:    vmov.f32 s12, s5
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s11, s17
+; CHECK-LIS-NEXT:    vmov.f32 s0, s6
+; CHECK-LIS-NEXT:    vadd.f32 q2, q2, q3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s16
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s3, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-LIS-NEXT:    vadd.f32 q0, q2, q0
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LIS-NEXT:    vmov.f32 s13, s4
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s14, s7
+; CHECK-LIS-NEXT:    vmov.f32 s22, s6
+; CHECK-LIS-NEXT:    vmov.f32 s12, s9
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s20, s8
+; CHECK-LIS-NEXT:    vmov.f32 s21, s11
+; CHECK-LIS-NEXT:    vmov.f32 s23, s17
+; CHECK-LIS-NEXT:    vadd.f32 q3, q5, q3
+; CHECK-LIS-NEXT:    vmov.f32 s4, s10
+; CHECK-LIS-NEXT:    vmov.f32 s6, s16
+; CHECK-LIS-NEXT:    vmov.f32 s7, s19
+; CHECK-LIS-NEXT:    vadd.f32 q1, q3, q1
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %l1 = load <24 x float>, ptr %src, align 4
   %s1 = shufflevector <24 x float> %l1, <24 x float> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
@@ -933,80 +1287,155 @@ entry:
 }
 
 define void @vld3_v16f32(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v16f32:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
-; CHECK-NEXT:    vldrw.u32 q6, [r0, #176]
-; CHECK-NEXT:    vmov.f32 s10, s2
-; CHECK-NEXT:    vmov.f32 s13, s0
-; CHECK-NEXT:    vmov.f32 s14, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s9, s7
-; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmov.f32 s15, s18
-; CHECK-NEXT:    vmov.f32 s11, s17
-; CHECK-NEXT:    vadd.f32 q2, q2, q3
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s16
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s3, s19
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vadd.f32 q0, q2, q0
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s16, s9
-; CHECK-NEXT:    vmov.f32 s19, s14
-; CHECK-NEXT:    vmov.f32 s20, s8
-; CHECK-NEXT:    vmov.f32 s21, s11
-; CHECK-NEXT:    vmov.f32 s23, s13
-; CHECK-NEXT:    vmov.f32 s4, s10
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #160]
-; CHECK-NEXT:    vmov.f32 s6, s12
-; CHECK-NEXT:    vadd.f32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s7, s15
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #144]
-; CHECK-NEXT:    vadd.f32 q1, q4, q1
-; CHECK-NEXT:    vmov.f32 s18, s10
-; CHECK-NEXT:    vmov.f32 s21, s8
-; CHECK-NEXT:    vmov.f32 s22, s11
-; CHECK-NEXT:    vmov.f32 s16, s12
-; CHECK-NEXT:    vmov.f32 s17, s15
-; CHECK-NEXT:    vmov.f32 s20, s13
-; CHECK-NEXT:    vmov.f32 s23, s26
-; CHECK-NEXT:    vmov.f32 s19, s25
-; CHECK-NEXT:    vadd.f32 q4, q4, q5
-; CHECK-NEXT:    vmov.f32 s8, s14
-; CHECK-NEXT:    vmov.f32 s10, s24
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #112]
-; CHECK-NEXT:    vmov.f32 s11, s27
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #128]
-; CHECK-NEXT:    vadd.f32 q2, q4, q2
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #96]
-; CHECK-NEXT:    vmov.f32 s25, s12
-; CHECK-NEXT:    vstrw.32 q2, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s26, s15
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s30, s14
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vmov.f32 s24, s17
-; CHECK-NEXT:    vmov.f32 s27, s22
-; CHECK-NEXT:    vmov.f32 s28, s16
-; CHECK-NEXT:    vmov.f32 s29, s19
-; CHECK-NEXT:    vmov.f32 s31, s21
-; CHECK-NEXT:    vadd.f32 q6, q7, q6
-; CHECK-NEXT:    vmov.f32 s12, s18
-; CHECK-NEXT:    vmov.f32 s14, s20
-; CHECK-NEXT:    vmov.f32 s15, s23
-; CHECK-NEXT:    vadd.f32 q3, q6, q3
-; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: vld3_v16f32:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LV-NEXT:    vldrw.u32 q6, [r0, #176]
+; CHECK-LV-NEXT:    vmov.f32 s10, s2
+; CHECK-LV-NEXT:    vmov.f32 s13, s0
+; CHECK-LV-NEXT:    vmov.f32 s14, s3
+; CHECK-LV-NEXT:    vmov.f32 s8, s4
+; CHECK-LV-NEXT:    vmov.f32 s9, s7
+; CHECK-LV-NEXT:    vmov.f32 s12, s5
+; CHECK-LV-NEXT:    vmov.f32 s15, s18
+; CHECK-LV-NEXT:    vmov.f32 s11, s17
+; CHECK-LV-NEXT:    vadd.f32 q2, q2, q3
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vmov.f32 s2, s16
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LV-NEXT:    vmov.f32 s3, s19
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LV-NEXT:    vadd.f32 q0, q2, q0
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LV-NEXT:    vmov.f32 s17, s4
+; CHECK-LV-NEXT:    vmov.f32 s18, s7
+; CHECK-LV-NEXT:    vmov.f32 s22, s6
+; CHECK-LV-NEXT:    vmov.f32 s16, s9
+; CHECK-LV-NEXT:    vmov.f32 s19, s14
+; CHECK-LV-NEXT:    vmov.f32 s20, s8
+; CHECK-LV-NEXT:    vmov.f32 s21, s11
+; CHECK-LV-NEXT:    vmov.f32 s23, s13
+; CHECK-LV-NEXT:    vmov.f32 s4, s10
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #160]
+; CHECK-LV-NEXT:    vmov.f32 s6, s12
+; CHECK-LV-NEXT:    vadd.f32 q4, q5, q4
+; CHECK-LV-NEXT:    vmov.f32 s7, s15
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #144]
+; CHECK-LV-NEXT:    vadd.f32 q1, q4, q1
+; CHECK-LV-NEXT:    vmov.f32 s18, s10
+; CHECK-LV-NEXT:    vmov.f32 s21, s8
+; CHECK-LV-NEXT:    vmov.f32 s22, s11
+; CHECK-LV-NEXT:    vmov.f32 s16, s12
+; CHECK-LV-NEXT:    vmov.f32 s17, s15
+; CHECK-LV-NEXT:    vmov.f32 s20, s13
+; CHECK-LV-NEXT:    vmov.f32 s23, s26
+; CHECK-LV-NEXT:    vmov.f32 s19, s25
+; CHECK-LV-NEXT:    vadd.f32 q4, q4, q5
+; CHECK-LV-NEXT:    vmov.f32 s8, s14
+; CHECK-LV-NEXT:    vmov.f32 s10, s24
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #112]
+; CHECK-LV-NEXT:    vmov.f32 s11, s27
+; CHECK-LV-NEXT:    vldrw.u32 q5, [r0, #128]
+; CHECK-LV-NEXT:    vadd.f32 q2, q4, q2
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #96]
+; CHECK-LV-NEXT:    vmov.f32 s25, s12
+; CHECK-LV-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-LV-NEXT:    vmov.f32 s26, s15
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LV-NEXT:    vmov.f32 s30, s14
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LV-NEXT:    vmov.f32 s24, s17
+; CHECK-LV-NEXT:    vmov.f32 s27, s22
+; CHECK-LV-NEXT:    vmov.f32 s28, s16
+; CHECK-LV-NEXT:    vmov.f32 s29, s19
+; CHECK-LV-NEXT:    vmov.f32 s31, s21
+; CHECK-LV-NEXT:    vadd.f32 q6, q7, q6
+; CHECK-LV-NEXT:    vmov.f32 s12, s18
+; CHECK-LV-NEXT:    vmov.f32 s14, s20
+; CHECK-LV-NEXT:    vmov.f32 s15, s23
+; CHECK-LV-NEXT:    vadd.f32 q3, q6, q3
+; CHECK-LV-NEXT:    vstrw.32 q3, [r1, #32]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: vld3_v16f32:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LIS-NEXT:    vldrw.u32 q6, [r0, #176]
+; CHECK-LIS-NEXT:    vmov.f32 s10, s2
+; CHECK-LIS-NEXT:    vmov.f32 s13, s0
+; CHECK-LIS-NEXT:    vmov.f32 s14, s3
+; CHECK-LIS-NEXT:    vmov.f32 s8, s4
+; CHECK-LIS-NEXT:    vmov.f32 s9, s7
+; CHECK-LIS-NEXT:    vmov.f32 s12, s5
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s11, s17
+; CHECK-LIS-NEXT:    vmov.f32 s0, s6
+; CHECK-LIS-NEXT:    vadd.f32 q2, q2, q3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s16
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s3, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-LIS-NEXT:    vadd.f32 q0, q2, q0
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LIS-NEXT:    vmov.f32 s13, s4
+; CHECK-LIS-NEXT:    vmov.f32 s14, s7
+; CHECK-LIS-NEXT:    vmov.f32 s22, s6
+; CHECK-LIS-NEXT:    vmov.f32 s12, s9
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s20, s8
+; CHECK-LIS-NEXT:    vmov.f32 s21, s11
+; CHECK-LIS-NEXT:    vmov.f32 s23, s17
+; CHECK-LIS-NEXT:    vadd.f32 q3, q5, q3
+; CHECK-LIS-NEXT:    vmov.f32 s4, s10
+; CHECK-LIS-NEXT:    vmov.f32 s7, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0, #160]
+; CHECK-LIS-NEXT:    vmov.f32 s6, s16
+; CHECK-LIS-NEXT:    vadd.f32 q1, q3, q1
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #144]
+; CHECK-LIS-NEXT:    vmov.f32 s18, s10
+; CHECK-LIS-NEXT:    vmov.f32 s21, s8
+; CHECK-LIS-NEXT:    vmov.f32 s22, s11
+; CHECK-LIS-NEXT:    vmov.f32 s16, s12
+; CHECK-LIS-NEXT:    vmov.f32 s17, s15
+; CHECK-LIS-NEXT:    vmov.f32 s20, s13
+; CHECK-LIS-NEXT:    vmov.f32 s23, s26
+; CHECK-LIS-NEXT:    vmov.f32 s19, s25
+; CHECK-LIS-NEXT:    vmov.f32 s8, s14
+; CHECK-LIS-NEXT:    vadd.f32 q4, q4, q5
+; CHECK-LIS-NEXT:    vmov.f32 s10, s24
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #112]
+; CHECK-LIS-NEXT:    vmov.f32 s11, s27
+; CHECK-LIS-NEXT:    vldrw.u32 q6, [r0, #128]
+; CHECK-LIS-NEXT:    vadd.f32 q2, q4, q2
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #96]
+; CHECK-LIS-NEXT:    vmov.f32 s21, s12
+; CHECK-LIS-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-LIS-NEXT:    vmov.f32 s22, s15
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s30, s14
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LIS-NEXT:    vmov.f32 s20, s17
+; CHECK-LIS-NEXT:    vmov.f32 s23, s26
+; CHECK-LIS-NEXT:    vmov.f32 s28, s16
+; CHECK-LIS-NEXT:    vmov.f32 s29, s19
+; CHECK-LIS-NEXT:    vmov.f32 s31, s25
+; CHECK-LIS-NEXT:    vadd.f32 q5, q7, q5
+; CHECK-LIS-NEXT:    vmov.f32 s12, s18
+; CHECK-LIS-NEXT:    vmov.f32 s14, s24
+; CHECK-LIS-NEXT:    vmov.f32 s15, s27
+; CHECK-LIS-NEXT:    vadd.f32 q3, q5, q3
+; CHECK-LIS-NEXT:    vstrw.32 q3, [r1, #32]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %l1 = load <48 x float>, ptr %src, align 4
   %s1 = shufflevector <48 x float> %l1, <48 x float> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>

>From 5621deddf6b2b30e5e8427186c16dd12c73ca484 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 11 Sep 2023 19:51:13 +0100
Subject: [PATCH 2/4] [RISCV] Shrink vslideup's LMUL when lowering fixed
 insert_subvector

Similar to #65598, if we're using a vslideup to insert a fixed length vector
into another vector, then we can work out the minimum number of registers it
will need to slide up across given the minimum VLEN, and shrink the type
operated on to reduce LMUL accordingly.

This is somewhat dependent on #65916, since it introduces a subregister copy
that triggers a crash with -early-live-intervals in one of the tests.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  18 +
 .../rvv/fixed-vectors-insert-subvector.ll     |  45 ++-
 .../rvv/fixed-vectors-strided-load-combine.ll |  80 ++---
 .../CodeGen/RISCV/rvv/fpclamptosat_vec.ll     | 330 ++++++++----------
 4 files changed, 229 insertions(+), 244 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 0dd03076cc05b36..71570f7f9d700f9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8606,6 +8606,18 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
       ContainerVT = getContainerForFixedLengthVector(VecVT);
       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
     }
+
+    // Shrink down Vec so we're performing the slideup on a smaller LMUL.
+    unsigned LastIdx = OrigIdx + SubVecVT.getVectorNumElements() - 1;
+    MVT OrigContainerVT = ContainerVT;
+    SDValue OrigVec = Vec;
+    if (auto ShrunkVT =
+            getSmallestVTForIndex(ContainerVT, LastIdx, DL, DAG, Subtarget)) {
+      ContainerVT = *ShrunkVT;
+      Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ContainerVT, Vec,
+                        DAG.getVectorIdxConstant(0, DL));
+    }
+
     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
                          DAG.getUNDEF(ContainerVT), SubVec,
                          DAG.getConstant(0, DL, XLenVT));
@@ -8636,6 +8648,12 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
                            SlideupAmt, Mask, VL, Policy);
     }
 
+    // If we performed the slideup on a smaller LMUL, insert the result back
+    // into the rest of the vector.
+    if (ContainerVT != OrigContainerVT)
+      SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, OrigContainerVT, OrigVec,
+                           SubVec, DAG.getVectorIdxConstant(0, DL));
+
     if (VecVT.isFixedLengthVector())
       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
     return DAG.getBitcast(Op.getValueType(), SubVec);
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 1d6a45ed36f335c..6a9212ed309a8ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -14,7 +14,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_0(<vscale x 8 x i32> %vec, ptr %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v12, (a0)
-; CHECK-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %sv = load <2 x i32>, ptr %svp
@@ -27,7 +27,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_2(<vscale x 8 x i32> %vec, ptr %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v12, (a0)
-; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 2
 ; CHECK-NEXT:    ret
   %sv = load <2 x i32>, ptr %svp
@@ -40,7 +40,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_6(<vscale x 8 x i32> %vec, ptr %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v12, (a0)
-; CHECK-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 6
 ; CHECK-NEXT:    ret
   %sv = load <2 x i32>, ptr %svp
@@ -51,22 +51,19 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_6(<vscale x 8 x i32> %vec, ptr %
 define <vscale x 8 x i32> @insert_nxv8i32_v8i32_0(<vscale x 8 x i32> %vec, ptr %svp) {
 ; LMULMAX2-LABEL: insert_nxv8i32_v8i32_0:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT:    vle32.v v12, (a0)
-; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
-; LMULMAX2-NEXT:    vmv.v.v v8, v12
+; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; LMULMAX2-NEXT:    vle32.v v8, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: insert_nxv8i32_v8i32_0:
 ; LMULMAX1:       # %bb.0:
+; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT:    vle32.v v12, (a0)
-; LMULMAX1-NEXT:    addi a0, a0, 16
-; LMULMAX1-NEXT:    vle32.v v16, (a0)
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vmv.v.v v8, v12
-; LMULMAX1-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vslideup.vi v8, v16, 4
+; LMULMAX1-NEXT:    vle32.v v12, (a1)
+; LMULMAX1-NEXT:    vsetvli zero, zero, e32, m1, tu, ma
+; LMULMAX1-NEXT:    vle32.v v8, (a0)
+; LMULMAX1-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; LMULMAX1-NEXT:    vslideup.vi v8, v12, 4
 ; LMULMAX1-NEXT:    ret
   %sv = load <8 x i32>, ptr %svp
   %v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 0)
@@ -84,14 +81,14 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_8(<vscale x 8 x i32> %vec, ptr %
 ;
 ; LMULMAX1-LABEL: insert_nxv8i32_v8i32_8:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT:    vle32.v v12, (a1)
+; LMULMAX1-NEXT:    vle32.v v12, (a0)
+; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle32.v v16, (a0)
 ; LMULMAX1-NEXT:    vsetivli zero, 12, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vslideup.vi v8, v16, 8
+; LMULMAX1-NEXT:    vslideup.vi v8, v12, 8
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vslideup.vi v8, v12, 12
+; LMULMAX1-NEXT:    vslideup.vi v8, v16, 12
 ; LMULMAX1-NEXT:    ret
   %sv = load <8 x i32>, ptr %svp
   %v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 8)
@@ -166,7 +163,7 @@ define void @insert_v8i32_v2i32_0(ptr %vp, ptr %svp) {
 ; LMULMAX2-NEXT:    vle32.v v8, (a1)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX2-NEXT:    vle32.v v10, (a0)
-; LMULMAX2-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; LMULMAX2-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; LMULMAX2-NEXT:    vmv.v.v v10, v8
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX2-NEXT:    vse32.v v10, (a0)
@@ -197,7 +194,7 @@ define void @insert_v8i32_v2i32_2(ptr %vp, ptr %svp) {
 ; LMULMAX2-NEXT:    vle32.v v8, (a1)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX2-NEXT:    vle32.v v10, (a0)
-; LMULMAX2-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; LMULMAX2-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; LMULMAX2-NEXT:    vslideup.vi v10, v8, 2
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX2-NEXT:    vse32.v v10, (a0)
@@ -508,9 +505,9 @@ define void @insert_v2i64_nxv16i64(ptr %psv0, ptr %psv1, <vscale x 16 x i64>* %o
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vle64.v v16, (a1)
-; CHECK-NEXT:    vsetivli zero, 6, e64, m8, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v16, 4
+; CHECK-NEXT:    vle64.v v12, (a1)
+; CHECK-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; CHECK-NEXT:    vslideup.vi v8, v12, 4
 ; CHECK-NEXT:    vs8r.v v8, (a2)
 ; CHECK-NEXT:    ret
   %sv0 = load <2 x i64>, ptr %psv0
@@ -539,7 +536,7 @@ define void @insert_v2i64_nxv16i64_lo2(ptr %psv, <vscale x 16 x i64>* %out) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetivli zero, 4, e64, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v16, v8, 2
 ; CHECK-NEXT:    vs8r.v v16, (a1)
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index f52ba6f51d5c897..805557905117add 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -27,13 +27,13 @@ define void @widen_3xv4i16(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a2, a0, 8
-; CHECK-NEXT:    vle16.v v10, (a2)
+; CHECK-NEXT:    vle16.v v9, (a2)
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vle16.v v12, (a0)
-; CHECK-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 4
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
+; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 8
+; CHECK-NEXT:    vslideup.vi v8, v10, 8
 ; CHECK-NEXT:    vse16.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, ptr %x
@@ -75,17 +75,17 @@ define void @widen_4xv4i16_unaligned(ptr %x, ptr %z) {
 ; CHECK-NO-MISALIGN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NO-MISALIGN-NEXT:    vle8.v v8, (a0)
 ; CHECK-NO-MISALIGN-NEXT:    addi a2, a0, 8
-; CHECK-NO-MISALIGN-NEXT:    vle8.v v10, (a2)
+; CHECK-NO-MISALIGN-NEXT:    vle8.v v9, (a2)
 ; CHECK-NO-MISALIGN-NEXT:    addi a2, a0, 16
-; CHECK-NO-MISALIGN-NEXT:    vle8.v v12, (a2)
+; CHECK-NO-MISALIGN-NEXT:    vle8.v v10, (a2)
 ; CHECK-NO-MISALIGN-NEXT:    addi a0, a0, 24
-; CHECK-NO-MISALIGN-NEXT:    vle8.v v14, (a0)
-; CHECK-NO-MISALIGN-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v10, 4
+; CHECK-NO-MISALIGN-NEXT:    vle8.v v12, (a0)
+; CHECK-NO-MISALIGN-NEXT:    vsetvli zero, zero, e16, m1, tu, ma
+; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NO-MISALIGN-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v12, 8
+; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v10, 8
 ; CHECK-NO-MISALIGN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v14, 12
+; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v12, 12
 ; CHECK-NO-MISALIGN-NEXT:    vse16.v v8, (a1)
 ; CHECK-NO-MISALIGN-NEXT:    ret
 ;
@@ -188,17 +188,17 @@ define void @strided_constant_mismatch_4xv4i16(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a2, a0, 2
-; CHECK-NEXT:    vle16.v v10, (a2)
+; CHECK-NEXT:    vle16.v v9, (a2)
 ; CHECK-NEXT:    addi a2, a0, 6
-; CHECK-NEXT:    vle16.v v12, (a2)
+; CHECK-NEXT:    vle16.v v10, (a2)
 ; CHECK-NEXT:    addi a0, a0, 8
-; CHECK-NEXT:    vle16.v v14, (a0)
-; CHECK-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 4
+; CHECK-NEXT:    vle16.v v12, (a0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
+; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 8
+; CHECK-NEXT:    vslideup.vi v8, v10, 8
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v14, 12
+; CHECK-NEXT:    vslideup.vi v8, v12, 12
 ; CHECK-NEXT:    vse16.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, ptr %x
@@ -258,17 +258,17 @@ define void @strided_runtime_mismatch_4xv4i16(ptr %x, ptr %z, i64 %s, i64 %t) {
 ; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV32-NEXT:    vle16.v v8, (a0)
 ; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    vle16.v v10, (a0)
+; RV32-NEXT:    vle16.v v9, (a0)
 ; RV32-NEXT:    add a0, a0, a4
-; RV32-NEXT:    vle16.v v12, (a0)
+; RV32-NEXT:    vle16.v v10, (a0)
 ; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    vle16.v v14, (a0)
-; RV32-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v10, 4
+; RV32-NEXT:    vle16.v v12, (a0)
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v9, 4
 ; RV32-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v12, 8
+; RV32-NEXT:    vslideup.vi v8, v10, 8
 ; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v14, 12
+; RV32-NEXT:    vslideup.vi v8, v12, 12
 ; RV32-NEXT:    vse16.v v8, (a1)
 ; RV32-NEXT:    ret
 ;
@@ -277,17 +277,17 @@ define void @strided_runtime_mismatch_4xv4i16(ptr %x, ptr %z, i64 %s, i64 %t) {
 ; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:    add a0, a0, a2
-; RV64-NEXT:    vle16.v v10, (a0)
+; RV64-NEXT:    vle16.v v9, (a0)
 ; RV64-NEXT:    add a0, a0, a3
-; RV64-NEXT:    vle16.v v12, (a0)
+; RV64-NEXT:    vle16.v v10, (a0)
 ; RV64-NEXT:    add a0, a0, a2
-; RV64-NEXT:    vle16.v v14, (a0)
-; RV64-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v10, 4
+; RV64-NEXT:    vle16.v v12, (a0)
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
+; RV64-NEXT:    vslideup.vi v8, v9, 4
 ; RV64-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v12, 8
+; RV64-NEXT:    vslideup.vi v8, v10, 8
 ; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT:    vslideup.vi v8, v14, 12
+; RV64-NEXT:    vslideup.vi v8, v12, 12
 ; RV64-NEXT:    vse16.v v8, (a1)
 ; RV64-NEXT:    ret
 ;
@@ -296,17 +296,17 @@ define void @strided_runtime_mismatch_4xv4i16(ptr %x, ptr %z, i64 %s, i64 %t) {
 ; ZVE64F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVE64F-NEXT:    vle16.v v8, (a0)
 ; ZVE64F-NEXT:    add a0, a0, a2
-; ZVE64F-NEXT:    vle16.v v10, (a0)
+; ZVE64F-NEXT:    vle16.v v9, (a0)
 ; ZVE64F-NEXT:    add a0, a0, a3
-; ZVE64F-NEXT:    vle16.v v12, (a0)
+; ZVE64F-NEXT:    vle16.v v10, (a0)
 ; ZVE64F-NEXT:    add a0, a0, a2
-; ZVE64F-NEXT:    vle16.v v14, (a0)
-; ZVE64F-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; ZVE64F-NEXT:    vslideup.vi v8, v10, 4
+; ZVE64F-NEXT:    vle16.v v12, (a0)
+; ZVE64F-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
+; ZVE64F-NEXT:    vslideup.vi v8, v9, 4
 ; ZVE64F-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; ZVE64F-NEXT:    vslideup.vi v8, v12, 8
+; ZVE64F-NEXT:    vslideup.vi v8, v10, 8
 ; ZVE64F-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; ZVE64F-NEXT:    vslideup.vi v8, v14, 12
+; ZVE64F-NEXT:    vslideup.vi v8, v12, 12
 ; ZVE64F-NEXT:    vse16.v v8, (a1)
 ; ZVE64F-NEXT:    ret
   %a = load <4 x i16>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index 31e7e7be76c89b1..f598118c18aff9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -460,54 +460,49 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    lui a0, 524288
@@ -632,54 +627,49 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    li a0, -1
@@ -813,54 +803,49 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    li a0, -1
@@ -1454,8 +1439,8 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
@@ -1466,16 +1451,16 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -1484,7 +1469,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -1737,8 +1722,8 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
@@ -1749,16 +1734,16 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -1767,7 +1752,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -2040,8 +2025,8 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
@@ -2052,16 +2037,16 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -2070,7 +2055,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -3796,54 +3781,49 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    lui a0, 524288
@@ -3966,54 +3946,49 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    li a0, -1
@@ -4146,54 +4121,49 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    li a0, -1
@@ -4775,8 +4745,8 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
@@ -4787,16 +4757,16 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -4805,7 +4775,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -5054,8 +5024,8 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
@@ -5066,16 +5036,16 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -5084,7 +5054,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -5356,8 +5326,8 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
@@ -5368,16 +5338,16 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -5386,7 +5356,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload

>From 21f5a5251b8423b716f695c498e28a5ad03e3e70 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 12 Sep 2023 14:35:22 +0100
Subject: [PATCH 3/4] [RISCV] Reduce LMUL when index is known when lowering
 insert_vector_elt

Continuing on from #65997, if the index of insert_vector_elt is a constant then
we can work out what the minimum number of registers will be needed for the
slideup and choose a smaller type to operate on. This reduces the LMUL for not
just the slideup but also for the scalar insert.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  25 ++
 .../RISCV/rvv/fixed-vectors-insert-i1.ll      |   2 +-
 .../CodeGen/RISCV/rvv/fixed-vectors-insert.ll |  14 +-
 .../RISCV/rvv/fixed-vectors-masked-gather.ll  | 292 +++++++++---------
 llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll   |  34 +-
 llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll   |   6 +-
 .../CodeGen/RISCV/rvv/insertelt-int-rv32.ll   |  54 ++--
 .../CodeGen/RISCV/rvv/insertelt-int-rv64.ll   |  46 +--
 .../RISCV/rvv/vsetvli-insert-crossbb.ll       |   5 +-
 9 files changed, 252 insertions(+), 226 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 71570f7f9d700f9..d7e61bae47cc641 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -7401,6 +7401,19 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
   }
 
+  MVT OrigContainerVT = ContainerVT;
+  SDValue OrigVec = Vec;
+  // If we know the index we're going to insert at, we can shrink down Vec so
+  // we're performing the vslide1down on a smaller LMUL.
+  if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
+    if (auto ShrunkVT = getSmallestVTForIndex(ContainerVT, CIdx->getZExtValue(),
+                                              DL, DAG, Subtarget)) {
+      ContainerVT = *ShrunkVT;
+      Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ContainerVT, Vec,
+                        DAG.getVectorIdxConstant(0, DL));
+    }
+  }
+
   MVT XLenVT = Subtarget.getXLenVT();
 
   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
@@ -7425,6 +7438,10 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
     if (isNullConstant(Idx)) {
       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
+
+      if (ContainerVT != OrigContainerVT)
+        Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, OrigContainerVT, OrigVec,
+                          Vec, DAG.getVectorIdxConstant(0, DL));
       if (!VecVT.isFixedLengthVector())
         return Vec;
       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
@@ -7457,6 +7474,10 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
       // Bitcast back to the right container type.
       ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
 
+      if (ContainerVT != OrigContainerVT)
+        ValInVec =
+            DAG.getNode(ISD::INSERT_SUBVECTOR, DL, OrigContainerVT, OrigVec,
+                        ValInVec, DAG.getVectorIdxConstant(0, DL));
       if (!VecVT.isFixedLengthVector())
         return ValInVec;
       return convertFromScalableVector(VecVT, ValInVec, DAG, Subtarget);
@@ -7487,6 +7508,10 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
     Policy = RISCVII::TAIL_AGNOSTIC;
   SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, ValInVec,
                                 Idx, Mask, InsertVL, Policy);
+
+  if (ContainerVT != OrigContainerVT)
+    Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, OrigContainerVT, OrigVec,
+                          Slideup, DAG.getVectorIdxConstant(0, DL));
   if (!VecVT.isFixedLengthVector())
     return Slideup;
   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
index b3cbad3d9e6b1d7..f7737784d4ca57e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
@@ -108,7 +108,7 @@ define <64 x i1> @insertelt_v64i1(<64 x i1> %x, i1 %elt) nounwind {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    vsetivli zero, 2, e8, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 2, e8, m1, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index 373a96356a207e2..cbcca9d2696f4ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -40,7 +40,7 @@ define <32 x i32> @insertelt_v32i32_0(<32 x i32> %a, i32 %y) {
 ; CHECK-LABEL: insertelt_v32i32_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %b = insertelement <32 x i32> %a, i32 %y, i32 0
@@ -54,7 +54,7 @@ define <32 x i32> @insertelt_v32i32_4(<32 x i32> %a, i32 %y) {
 ; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.s.x v16, a0
-; CHECK-NEXT:    vsetivli zero, 5, e32, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 5, e32, m2, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 4
 ; CHECK-NEXT:    ret
   %b = insertelement <32 x i32> %a, i32 %y, i32 4
@@ -92,7 +92,7 @@ define <64 x i32> @insertelt_v64i32_0(<64 x i32> %a, i32 %y) {
 ; CHECK-LABEL: insertelt_v64i32_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %b = insertelement <64 x i32> %a, i32 %y, i32 0
@@ -390,7 +390,7 @@ define <8 x i64> @insertelt_v8i64_0(<8 x i64> %a, ptr %x) {
 ; CHECK-LABEL: insertelt_v8i64_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, -1
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 8, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %b = insertelement <8 x i64> %a, i64 -1, i32 0
@@ -468,7 +468,7 @@ define <8 x i64> @insertelt_c6_v8i64_0(<8 x i64> %a, ptr %x) {
 ; CHECK-LABEL: insertelt_c6_v8i64_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 6
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 8, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %b = insertelement <8 x i64> %a, i64 6, i32 0
@@ -550,9 +550,9 @@ define void @insertelt_c6_v8i64_0_add(ptr %x, ptr %y) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    li a2, 6
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 8, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a2
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vle64.v v12, (a1)
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    vse64.v v8, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index f3af177ac0ff27e..24626ffe1fa0450 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -2426,14 +2426,14 @@ define <8 x i32> @mgather_v8i32(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i32> %passthr
 ; RV64ZVE32F-NEXT:  .LBB34_9: # %cond.load
 ; RV64ZVE32F-NEXT:    ld a2, 0(a0)
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB34_2
 ; RV64ZVE32F-NEXT:  .LBB34_10: # %cond.load1
 ; RV64ZVE32F-NEXT:    ld a2, 8(a0)
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v10, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 1
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
@@ -2441,7 +2441,7 @@ define <8 x i32> @mgather_v8i32(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i32> %passthr
 ; RV64ZVE32F-NEXT:  .LBB34_11: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v10, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
@@ -2449,7 +2449,7 @@ define <8 x i32> @mgather_v8i32(<8 x ptr> %ptrs, <8 x i1> %m, <8 x i32> %passthr
 ; RV64ZVE32F-NEXT:  .LBB34_12: # %cond.load7
 ; RV64ZVE32F-NEXT:    ld a2, 24(a0)
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v10, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -2520,7 +2520,7 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v10, a2
 ; RV64ZVE32F-NEXT:  .LBB35_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
@@ -2533,9 +2533,9 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB35_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -2581,7 +2581,7 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB35_6
@@ -2593,9 +2593,9 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
+; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB35_7
 ; RV64ZVE32F-NEXT:  .LBB35_14: # %cond.load10
@@ -2670,7 +2670,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v10, a2
 ; RV64ZVE32F-NEXT:  .LBB36_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
@@ -2683,9 +2683,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB36_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -2731,7 +2731,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB36_6
@@ -2743,9 +2743,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
+; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB36_7
 ; RV64ZVE32F-NEXT:  .LBB36_14: # %cond.load10
@@ -2822,7 +2822,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v10, a2
 ; RV64ZVE32F-NEXT:  .LBB37_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
@@ -2836,9 +2836,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB37_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -2886,7 +2886,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB37_6
@@ -2900,7 +2900,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB37_7
@@ -2981,7 +2981,7 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v10, a2
 ; RV64ZVE32F-NEXT:  .LBB38_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
@@ -2994,9 +2994,9 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB38_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -3042,7 +3042,7 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB38_6
@@ -3054,9 +3054,9 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
+; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB38_7
 ; RV64ZVE32F-NEXT:  .LBB38_14: # %cond.load10
@@ -3132,7 +3132,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v10, a2
 ; RV64ZVE32F-NEXT:  .LBB39_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
@@ -3145,9 +3145,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB39_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -3193,7 +3193,7 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB39_6
@@ -3205,9 +3205,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
+; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB39_7
 ; RV64ZVE32F-NEXT:  .LBB39_14: # %cond.load10
@@ -3287,7 +3287,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
 ; RV64ZVE32F-NEXT:    slli a3, a3, 2
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
 ; RV64ZVE32F-NEXT:    lw a3, 0(a3)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v10, a3
 ; RV64ZVE32F-NEXT:  .LBB40_2: # %else
 ; RV64ZVE32F-NEXT:    andi a3, a2, 2
@@ -3301,9 +3301,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
 ; RV64ZVE32F-NEXT:    lw a3, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a3
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a3
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB40_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -3351,7 +3351,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
 ; RV64ZVE32F-NEXT:    lw a3, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v12, a3
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a3, a2, 8
 ; RV64ZVE32F-NEXT:    beqz a3, .LBB40_6
@@ -3364,9 +3364,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
 ; RV64ZVE32F-NEXT:    lw a3, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a3
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
+; RV64ZVE32F-NEXT:    vmv.s.x v8, a3
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a3, a2, 16
 ; RV64ZVE32F-NEXT:    beqz a3, .LBB40_7
 ; RV64ZVE32F-NEXT:  .LBB40_14: # %cond.load10
@@ -3440,7 +3440,7 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
 ; RV64ZVE32F-NEXT:    andi a2, a1, 1
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB41_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
@@ -3457,7 +3457,7 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
 ; RV64ZVE32F-NEXT:  .LBB41_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, ta, ma
@@ -3501,9 +3501,9 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 2
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB41_6
 ; RV64ZVE32F-NEXT:  .LBB41_13: # %cond.load7
@@ -3514,7 +3514,7 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB41_7
@@ -8229,14 +8229,14 @@ define <8 x float> @mgather_v8f32(<8 x ptr> %ptrs, <8 x i1> %m, <8 x float> %pas
 ; RV64ZVE32F-NEXT:  .LBB73_9: # %cond.load
 ; RV64ZVE32F-NEXT:    ld a2, 0(a0)
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB73_2
 ; RV64ZVE32F-NEXT:  .LBB73_10: # %cond.load1
 ; RV64ZVE32F-NEXT:    ld a2, 8(a0)
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 1
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
@@ -8244,7 +8244,7 @@ define <8 x float> @mgather_v8f32(<8 x ptr> %ptrs, <8 x i1> %m, <8 x float> %pas
 ; RV64ZVE32F-NEXT:  .LBB73_11: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
@@ -8252,7 +8252,7 @@ define <8 x float> @mgather_v8f32(<8 x ptr> %ptrs, <8 x i1> %m, <8 x float> %pas
 ; RV64ZVE32F-NEXT:  .LBB73_12: # %cond.load7
 ; RV64ZVE32F-NEXT:    ld a2, 24(a0)
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v10, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
@@ -8323,7 +8323,7 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB74_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
@@ -8336,9 +8336,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB74_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -8384,7 +8384,7 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB74_6
@@ -8396,9 +8396,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB74_7
 ; RV64ZVE32F-NEXT:  .LBB74_14: # %cond.load10
@@ -8473,7 +8473,7 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB75_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
@@ -8486,9 +8486,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB75_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -8534,7 +8534,7 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB75_6
@@ -8546,9 +8546,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB75_7
 ; RV64ZVE32F-NEXT:  .LBB75_14: # %cond.load10
@@ -8625,7 +8625,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB76_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
@@ -8639,9 +8639,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB76_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -8689,7 +8689,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB76_6
@@ -8703,7 +8703,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB76_7
@@ -8784,7 +8784,7 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB77_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
@@ -8797,9 +8797,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB77_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -8845,7 +8845,7 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB77_6
@@ -8857,9 +8857,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB77_7
 ; RV64ZVE32F-NEXT:  .LBB77_14: # %cond.load10
@@ -8935,7 +8935,7 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB78_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
@@ -8948,9 +8948,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB78_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -8996,7 +8996,7 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB78_6
@@ -9008,9 +9008,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB78_7
 ; RV64ZVE32F-NEXT:  .LBB78_14: # %cond.load10
@@ -9090,7 +9090,7 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    slli a3, a3, 2
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a3)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v10, fa5
 ; RV64ZVE32F-NEXT:  .LBB79_2: # %else
 ; RV64ZVE32F-NEXT:    andi a3, a2, 2
@@ -9104,9 +9104,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 1
 ; RV64ZVE32F-NEXT:  .LBB79_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
@@ -9154,7 +9154,7 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 2
 ; RV64ZVE32F-NEXT:    andi a3, a2, 8
 ; RV64ZVE32F-NEXT:    beqz a3, .LBB79_6
@@ -9167,9 +9167,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
 ; RV64ZVE32F-NEXT:    add a3, a0, a3
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a3, a2, 16
 ; RV64ZVE32F-NEXT:    beqz a3, .LBB79_7
 ; RV64ZVE32F-NEXT:  .LBB79_14: # %cond.load10
@@ -9243,7 +9243,7 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    andi a2, a1, 1
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB80_2
 ; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
@@ -9260,7 +9260,7 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
 ; RV64ZVE32F-NEXT:  .LBB80_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, ta, ma
@@ -9304,9 +9304,9 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    slli a2, a2, 2
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vfmv.s.f v14, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 2
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB80_6
 ; RV64ZVE32F-NEXT:  .LBB80_13: # %cond.load7
@@ -9317,7 +9317,7 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
 ; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v8, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB80_7
@@ -12382,7 +12382,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
-; RV64ZVE32F-NEXT:    vsetvli zero, a3, e8, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetvli zero, a3, e8, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v10, a2
 ; RV64ZVE32F-NEXT:  .LBB98_2: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
@@ -12395,7 +12395,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 1
 ; RV64ZVE32F-NEXT:  .LBB98_4: # %else2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
@@ -12417,9 +12417,9 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vsetvli zero, a3, e8, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 4
+; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e8, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 4
 ; RV64ZVE32F-NEXT:  .LBB98_8: # %else11
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
@@ -12433,7 +12433,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e8, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e8, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 5
 ; RV64ZVE32F-NEXT:  .LBB98_10: # %else14
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
@@ -12456,9 +12456,9 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 10, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 9
+; RV64ZVE32F-NEXT:    vmv.s.x v13, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 10, e8, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v13, 9
 ; RV64ZVE32F-NEXT:  .LBB98_15: # %else26
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v13, v12, 4
@@ -12472,7 +12472,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 11, e8, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 11, e8, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 10
 ; RV64ZVE32F-NEXT:  .LBB98_17: # %else29
 ; RV64ZVE32F-NEXT:    slli a2, a1, 52
@@ -12484,9 +12484,9 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 12, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 11
+; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 12, e8, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 11
 ; RV64ZVE32F-NEXT:  .LBB98_19: # %else32
 ; RV64ZVE32F-NEXT:    vsetivli zero, 16, e8, m2, ta, ma
 ; RV64ZVE32F-NEXT:    slli a2, a1, 51
@@ -12497,9 +12497,9 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 13, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 12
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 13, e8, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 12
 ; RV64ZVE32F-NEXT:  .LBB98_21: # %else35
 ; RV64ZVE32F-NEXT:    slli a2, a1, 50
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB98_23
@@ -12510,9 +12510,9 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 14, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 13
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 14, e8, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 13
 ; RV64ZVE32F-NEXT:  .LBB98_23: # %else38
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    slli a2, a1, 49
@@ -12661,7 +12661,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e8, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e8, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB98_6
@@ -12672,9 +12672,9 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 3
+; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
 ; RV64ZVE32F-NEXT:    bnez a2, .LBB98_7
 ; RV64ZVE32F-NEXT:    j .LBB98_8
@@ -12684,7 +12684,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e8, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e8, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 6
 ; RV64ZVE32F-NEXT:    andi a2, a1, 128
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB98_12
@@ -12695,9 +12695,9 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 7
+; RV64ZVE32F-NEXT:    vmv.s.x v13, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v13, 7
 ; RV64ZVE32F-NEXT:    andi a2, a1, 256
 ; RV64ZVE32F-NEXT:    beqz a2, .LBB98_13
 ; RV64ZVE32F-NEXT:  .LBB98_53: # %cond.load22
@@ -12707,9 +12707,9 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vsetvli zero, a3, e8, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 9, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 8
+; RV64ZVE32F-NEXT:    vmv.s.x v13, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 9, e8, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v13, 8
 ; RV64ZVE32F-NEXT:    andi a2, a1, 512
 ; RV64ZVE32F-NEXT:    bnez a2, .LBB98_14
 ; RV64ZVE32F-NEXT:    j .LBB98_15
@@ -12719,7 +12719,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 15, e8, m2, tu, ma
+; RV64ZVE32F-NEXT:    vsetivli zero, 15, e8, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 14
 ; RV64ZVE32F-NEXT:    slli a2, a1, 48
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB98_25
@@ -12730,9 +12730,9 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
-; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 16, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 15
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 16, e8, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v9, 15
 ; RV64ZVE32F-NEXT:    slli a2, a1, 47
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB98_26
 ; RV64ZVE32F-NEXT:  .LBB98_56: # %cond.load46
diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll
index 4bd9f7befa52a1c..060e99691cb1381 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll
@@ -109,7 +109,7 @@ define <vscale x 4 x half> @insertelt_nxv4f16_idx(<vscale x 4 x half> %v, half %
 define <vscale x 8 x half> @insertelt_nxv8f16_0(<vscale x 8 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv8f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x half> %v, half %elt, i32 0
@@ -119,7 +119,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_0(<vscale x 8 x half> %v, half %el
 define <vscale x 8 x half> @insertelt_nxv8f16_imm(<vscale x 8 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv8f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
@@ -143,7 +143,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_idx(<vscale x 8 x half> %v, half %
 define <vscale x 16 x half> @insertelt_nxv16f16_0(<vscale x 16 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv16f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x half> %v, half %elt, i32 0
@@ -153,7 +153,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_0(<vscale x 16 x half> %v, half
 define <vscale x 16 x half> @insertelt_nxv16f16_imm(<vscale x 16 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv16f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
@@ -177,7 +177,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_idx(<vscale x 16 x half> %v, hal
 define <vscale x 32 x half> @insertelt_nxv32f16_0(<vscale x 32 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv32f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x half> %v, half %elt, i32 0
@@ -187,7 +187,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_0(<vscale x 32 x half> %v, half
 define <vscale x 32 x half> @insertelt_nxv32f16_imm(<vscale x 32 x half> %v, half %elt) {
 ; CHECK-LABEL: insertelt_nxv32f16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
@@ -279,7 +279,7 @@ define <vscale x 2 x float> @insertelt_nxv2f32_idx(<vscale x 2 x float> %v, floa
 define <vscale x 4 x float> @insertelt_nxv4f32_0(<vscale x 4 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv4f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x float> %v, float %elt, i32 0
@@ -289,7 +289,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_0(<vscale x 4 x float> %v, float
 define <vscale x 4 x float> @insertelt_nxv4f32_imm(<vscale x 4 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv4f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v10, fa0
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
@@ -313,7 +313,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_idx(<vscale x 4 x float> %v, floa
 define <vscale x 8 x float> @insertelt_nxv8f32_0(<vscale x 8 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv8f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x float> %v, float %elt, i32 0
@@ -323,7 +323,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_0(<vscale x 8 x float> %v, float
 define <vscale x 8 x float> @insertelt_nxv8f32_imm(<vscale x 8 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv8f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
@@ -347,7 +347,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_idx(<vscale x 8 x float> %v, floa
 define <vscale x 16 x float> @insertelt_nxv16f32_0(<vscale x 16 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv16f32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x float> %v, float %elt, i32 0
@@ -357,7 +357,7 @@ define <vscale x 16 x float> @insertelt_nxv16f32_0(<vscale x 16 x float> %v, flo
 define <vscale x 16 x float> @insertelt_nxv16f32_imm(<vscale x 16 x float> %v, float %elt) {
 ; CHECK-LABEL: insertelt_nxv16f32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
@@ -415,7 +415,7 @@ define <vscale x 1 x double> @insertelt_nxv1f64_idx(<vscale x 1 x double> %v, do
 define <vscale x 2 x double> @insertelt_nxv2f64_0(<vscale x 2 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv2f64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x double> %v, double %elt, i32 0
@@ -449,7 +449,7 @@ define <vscale x 2 x double> @insertelt_nxv2f64_idx(<vscale x 2 x double> %v, do
 define <vscale x 4 x double> @insertelt_nxv4f64_0(<vscale x 4 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv4f64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x double> %v, double %elt, i32 0
@@ -459,7 +459,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_0(<vscale x 4 x double> %v, doub
 define <vscale x 4 x double> @insertelt_nxv4f64_imm(<vscale x 4 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv4f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
@@ -483,7 +483,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_idx(<vscale x 4 x double> %v, do
 define <vscale x 8 x double> @insertelt_nxv8f64_0(<vscale x 8 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv8f64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x double> %v, double %elt, i32 0
@@ -493,7 +493,7 @@ define <vscale x 8 x double> @insertelt_nxv8f64_0(<vscale x 8 x double> %v, doub
 define <vscale x 8 x double> @insertelt_nxv8f64_imm(<vscale x 8 x double> %v, double %elt) {
 ; CHECK-LABEL: insertelt_nxv8f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v16, fa0
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll
index a7bd15f2a7b330a..911072d9571ff81 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll
@@ -149,7 +149,7 @@ define <vscale x 16 x i1> @insertelt_nxv16i1(<vscale x 16 x i1> %x, i1 %elt) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetivli zero, 3, e8, m2, tu, ma
+; CHECK-NEXT:    vsetivli zero, 3, e8, m1, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 2
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 1
@@ -184,7 +184,7 @@ define <vscale x 32 x i1> @insertelt_nxv32i1(<vscale x 32 x i1> %x, i1 %elt) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    vsetivli zero, 3, e8, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 3, e8, m1, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 2
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 1
@@ -219,7 +219,7 @@ define <vscale x 64 x i1> @insertelt_nxv64i1(<vscale x 64 x i1> %x, i1 %elt) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vmv.s.x v16, a0
-; CHECK-NEXT:    vsetivli zero, 3, e8, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 3, e8, m1, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 2
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
index 39f94eab2aa6606..883906772874e72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
@@ -141,7 +141,7 @@ define <vscale x 8 x i8> @insertelt_nxv8i8_idx(<vscale x 8 x i8> %v, i8 signext
 define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 0
@@ -151,7 +151,7 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext
 define <vscale x 16 x i8> @insertelt_nxv16i8_imm(<vscale x 16 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, m2, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
@@ -175,7 +175,7 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_idx(<vscale x 16 x i8> %v, i8 signe
 define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 0
@@ -185,7 +185,7 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext
 define <vscale x 32 x i8> @insertelt_nxv32i8_imm(<vscale x 32 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
@@ -209,7 +209,7 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_idx(<vscale x 32 x i8> %v, i8 signe
 define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 0
@@ -219,7 +219,7 @@ define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext
 define <vscale x 64 x i8> @insertelt_nxv64i8_imm(<vscale x 64 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv64i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
@@ -345,7 +345,7 @@ define <vscale x 4 x i16> @insertelt_nxv4i16_idx(<vscale x 4 x i16> %v, i16 sign
 define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 0
@@ -355,7 +355,7 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signex
 define <vscale x 8 x i16> @insertelt_nxv8i16_imm(<vscale x 8 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv8i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
@@ -379,7 +379,7 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_idx(<vscale x 8 x i16> %v, i16 sign
 define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 0
@@ -389,7 +389,7 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 sig
 define <vscale x 16 x i16> @insertelt_nxv16i16_imm(<vscale x 16 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
@@ -413,7 +413,7 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_idx(<vscale x 16 x i16> %v, i16 s
 define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 0
@@ -423,7 +423,7 @@ define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 sig
 define <vscale x 32 x i16> @insertelt_nxv32i16_imm(<vscale x 32 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
@@ -515,7 +515,7 @@ define <vscale x 2 x i32> @insertelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %elt
 define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 %elt) {
 ; CHECK-LABEL: insertelt_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 0
@@ -525,7 +525,7 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 %elt)
 define <vscale x 4 x i32> @insertelt_nxv4i32_imm(<vscale x 4 x i32> %v, i32 %elt) {
 ; CHECK-LABEL: insertelt_nxv4i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
@@ -549,7 +549,7 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %elt
 define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 %elt) {
 ; CHECK-LABEL: insertelt_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 0
@@ -559,7 +559,7 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 %elt)
 define <vscale x 8 x i32> @insertelt_nxv8i32_imm(<vscale x 8 x i32> %v, i32 %elt) {
 ; CHECK-LABEL: insertelt_nxv8i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
@@ -583,7 +583,7 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %elt
 define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 %elt) {
 ; CHECK-LABEL: insertelt_nxv16i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 0
@@ -593,7 +593,7 @@ define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 %el
 define <vscale x 16 x i32> @insertelt_nxv16i32_imm(<vscale x 16 x i32> %v, i32 %elt) {
 ; CHECK-LABEL: insertelt_nxv16i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
@@ -655,7 +655,7 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt
 define <vscale x 2 x i64> @insertelt_nxv2i64_0(<vscale x 2 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    ret
@@ -693,7 +693,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt
 define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    ret
@@ -704,10 +704,10 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt)
 define <vscale x 4 x i64> @insertelt_nxv4i64_imm(<vscale x 4 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv4i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v12, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v12, v12, a1
-; CHECK-NEXT:    vsetivli zero, 4, e64, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 3
@@ -731,7 +731,7 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt
 define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    ret
@@ -742,10 +742,10 @@ define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt)
 define <vscale x 8 x i64> @insertelt_nxv8i64_imm(<vscale x 8 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv8i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
 ; CHECK-NEXT:    vslide1down.vx v16, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v16, v16, a1
-; CHECK-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 3
@@ -771,7 +771,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_0_c10(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: insertelt_nxv2i64_0_c10:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 10
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 10, i32 0
@@ -807,7 +807,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_0_cn1(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: insertelt_nxv2i64_0_cn1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
index 1dd00197bbbb044..40767eefdd814cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
@@ -141,7 +141,7 @@ define <vscale x 8 x i8> @insertelt_nxv8i8_idx(<vscale x 8 x i8> %v, i8 signext
 define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 0
@@ -151,7 +151,7 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext
 define <vscale x 16 x i8> @insertelt_nxv16i8_imm(<vscale x 16 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, m2, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
@@ -175,7 +175,7 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_idx(<vscale x 16 x i8> %v, i8 signe
 define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 0
@@ -185,7 +185,7 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext
 define <vscale x 32 x i8> @insertelt_nxv32i8_imm(<vscale x 32 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
@@ -209,7 +209,7 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_idx(<vscale x 32 x i8> %v, i8 signe
 define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 0
@@ -219,7 +219,7 @@ define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext
 define <vscale x 64 x i8> @insertelt_nxv64i8_imm(<vscale x 64 x i8> %v, i8 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv64i8_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e8, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
@@ -345,7 +345,7 @@ define <vscale x 4 x i16> @insertelt_nxv4i16_idx(<vscale x 4 x i16> %v, i16 sign
 define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 0
@@ -355,7 +355,7 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signex
 define <vscale x 8 x i16> @insertelt_nxv8i16_imm(<vscale x 8 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv8i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, m2, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
@@ -379,7 +379,7 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_idx(<vscale x 8 x i16> %v, i16 sign
 define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 0
@@ -389,7 +389,7 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 sig
 define <vscale x 16 x i16> @insertelt_nxv16i16_imm(<vscale x 16 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
@@ -413,7 +413,7 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_idx(<vscale x 16 x i16> %v, i16 s
 define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 0
@@ -423,7 +423,7 @@ define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 sig
 define <vscale x 32 x i16> @insertelt_nxv32i16_imm(<vscale x 32 x i16> %v, i16 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv32i16_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
@@ -515,7 +515,7 @@ define <vscale x 2 x i32> @insertelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 sign
 define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 0
@@ -525,7 +525,7 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 signex
 define <vscale x 4 x i32> @insertelt_nxv4i32_imm(<vscale x 4 x i32> %v, i32 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv4i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
@@ -549,7 +549,7 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 sign
 define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 0
@@ -559,7 +559,7 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 signex
 define <vscale x 8 x i32> @insertelt_nxv8i32_imm(<vscale x 8 x i32> %v, i32 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv8i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
@@ -583,7 +583,7 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 sign
 define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 0
@@ -593,7 +593,7 @@ define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 sig
 define <vscale x 16 x i32> @insertelt_nxv16i32_imm(<vscale x 16 x i32> %v, i32 signext %elt) {
 ; CHECK-LABEL: insertelt_nxv16i32_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e32, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
@@ -653,7 +653,7 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt
 define <vscale x 2 x i64> @insertelt_nxv2i64_0(<vscale x 2 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 0
@@ -689,7 +689,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt
 define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 0
@@ -699,7 +699,7 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt)
 define <vscale x 4 x i64> @insertelt_nxv4i64_imm(<vscale x 4 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv4i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    vslideup.vi v8, v12, 3
 ; CHECK-NEXT:    ret
@@ -725,7 +725,7 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt
 define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 0
@@ -735,7 +735,7 @@ define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt)
 define <vscale x 8 x i64> @insertelt_nxv8i64_imm(<vscale x 8 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv8i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m8, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
 ; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    vslideup.vi v8, v16, 3
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 73f651225da64d3..44a396ee29a8ab3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -810,9 +810,10 @@ for.end:                                          ; preds = %for.body
 define <vscale x 4 x i32> @cross_block_mutate(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b,
 ; CHECK-LABEL: cross_block_mutate:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli a0, 6, e32, m2, tu, ma
+; CHECK-NEXT:    vsetivli a0, 6, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 6, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
                                          <vscale x 4 x i1> %mask) {

>From 0ec8328a5f772860f9430448de7e14c2c15c2562 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 12 Sep 2023 14:47:52 +0100
Subject: [PATCH 4/4] Update comment

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d7e61bae47cc641..d41e5368eac84d6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -7403,8 +7403,8 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
 
   MVT OrigContainerVT = ContainerVT;
   SDValue OrigVec = Vec;
-  // If we know the index we're going to insert at, we can shrink down Vec so
-  // we're performing the vslide1down on a smaller LMUL.
+  // If we know the index we're going to insert at, we can shrink Vec so that
+  // we're performing the scalar inserts and slideup on a smaller LMUL.
   if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
     if (auto ShrunkVT = getSmallestVTForIndex(ContainerVT, CIdx->getZExtValue(),
                                               DL, DAG, Subtarget)) {



More information about the llvm-commits mailing list