[llvm] [RISCV] Shrink vslideup's LMUL when lowering fixed insert_subvector (PR #65997)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 13 07:48:12 PDT 2023


https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/65997:

>From 199e6df31bd90dbd00b84d8cda04469106a667d5 Mon Sep 17 00:00:00 2001
From: Jay Foad <jay.foad at amd.com>
Date: Wed, 13 Sep 2023 14:59:44 +0100
Subject: [PATCH 1/2] [TwoAddressInstruction] Update LiveIntervals after
 INSERT_SUBREG with undef read

Update LiveIntervals after rewriting:
  %reg = INSERT_SUBREG undef %reg, %subreg, subidx
to:
  undef %reg:subidx = COPY %subreg

D113044 implemented this for the non-undef case.
---
 .../lib/CodeGen/TwoAddressInstructionPass.cpp |   12 +-
 llvm/test/CodeGen/Thumb2/mve-fmath.ll         |   52 +-
 llvm/test/CodeGen/Thumb2/mve-shuffle.ll       |  189 ++-
 llvm/test/CodeGen/Thumb2/mve-vld3.ll          | 1269 +++++++++++------
 4 files changed, 1019 insertions(+), 503 deletions(-)

diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index 45f61262faf9391..41b888592cc18b6 100644
--- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -1868,12 +1868,16 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
             // %reg.subidx.
             LaneBitmask LaneMask =
                 TRI->getSubRegIndexLaneMask(mi->getOperand(0).getSubReg());
-            SlotIndex Idx = LIS->getInstructionIndex(*mi);
+            SlotIndex Idx = LIS->getInstructionIndex(*mi).getRegSlot();
             for (auto &S : LI.subranges()) {
               if ((S.LaneMask & LaneMask).none()) {
-                LiveRange::iterator UseSeg = S.FindSegmentContaining(Idx);
-                LiveRange::iterator DefSeg = std::next(UseSeg);
-                S.MergeValueNumberInto(DefSeg->valno, UseSeg->valno);
+                LiveRange::iterator DefSeg = S.FindSegmentContaining(Idx);
+                if (mi->getOperand(0).isUndef()) {
+                  S.removeValNo(DefSeg->valno);
+                } else {
+                  LiveRange::iterator UseSeg = std::prev(DefSeg);
+                  S.MergeValueNumberInto(DefSeg->valno, UseSeg->valno);
+                }
               }
             }
 
diff --git a/llvm/test/CodeGen/Thumb2/mve-fmath.ll b/llvm/test/CodeGen/Thumb2/mve-fmath.ll
index 844da3baa42ba8d..3a477f987cee61f 100644
--- a/llvm/test/CodeGen/Thumb2/mve-fmath.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-fmath.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LV
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LV
+; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LIS
+; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LIS
 
 define arm_aapcs_vfpcc <4 x float> @sqrt_float32_t(<4 x float> %src) {
 ; CHECK-LABEL: sqrt_float32_t:
@@ -1085,21 +1087,37 @@ entry:
 }
 
 define arm_aapcs_vfpcc <2 x double> @copysign_float64_t(<2 x double> %src1, <2 x double> %src2) {
-; CHECK-LABEL: copysign_float64_t:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r7, lr}
-; CHECK-NEXT:    push {r7, lr}
-; CHECK-NEXT:    vmov r0, r1, d3
-; CHECK-NEXT:    vmov r0, lr, d2
-; CHECK-NEXT:    vmov r0, r3, d1
-; CHECK-NEXT:    vmov r12, r2, d0
-; CHECK-NEXT:    lsrs r1, r1, #31
-; CHECK-NEXT:    bfi r3, r1, #31, #1
-; CHECK-NEXT:    lsr.w r1, lr, #31
-; CHECK-NEXT:    bfi r2, r1, #31, #1
-; CHECK-NEXT:    vmov d1, r0, r3
-; CHECK-NEXT:    vmov d0, r12, r2
-; CHECK-NEXT:    pop {r7, pc}
+; CHECK-LV-LABEL: copysign_float64_t:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .save {r7, lr}
+; CHECK-LV-NEXT:    push {r7, lr}
+; CHECK-LV-NEXT:    vmov r0, r1, d3
+; CHECK-LV-NEXT:    vmov r0, lr, d2
+; CHECK-LV-NEXT:    vmov r0, r3, d1
+; CHECK-LV-NEXT:    vmov r12, r2, d0
+; CHECK-LV-NEXT:    lsrs r1, r1, #31
+; CHECK-LV-NEXT:    bfi r3, r1, #31, #1
+; CHECK-LV-NEXT:    lsr.w r1, lr, #31
+; CHECK-LV-NEXT:    bfi r2, r1, #31, #1
+; CHECK-LV-NEXT:    vmov d1, r0, r3
+; CHECK-LV-NEXT:    vmov d0, r12, r2
+; CHECK-LV-NEXT:    pop {r7, pc}
+;
+; CHECK-LIS-LABEL: copysign_float64_t:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .save {r4, lr}
+; CHECK-LIS-NEXT:    push {r4, lr}
+; CHECK-LIS-NEXT:    vmov r0, r12, d3
+; CHECK-LIS-NEXT:    vmov r0, lr, d2
+; CHECK-LIS-NEXT:    vmov r4, r3, d1
+; CHECK-LIS-NEXT:    vmov r1, r2, d0
+; CHECK-LIS-NEXT:    lsr.w r0, r12, #31
+; CHECK-LIS-NEXT:    bfi r3, r0, #31, #1
+; CHECK-LIS-NEXT:    lsr.w r0, lr, #31
+; CHECK-LIS-NEXT:    bfi r2, r0, #31, #1
+; CHECK-LIS-NEXT:    vmov d1, r4, r3
+; CHECK-LIS-NEXT:    vmov d0, r1, r2
+; CHECK-LIS-NEXT:    pop {r4, pc}
 entry:
   %0 = call fast <2 x double> @llvm.copysign.v2f64(<2 x double> %src1, <2 x double> %src2)
   ret <2 x double> %0
diff --git a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
index 93a058828765e2b..6e644c58687faea 100644
--- a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECKFP
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LV
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LV,CHECKFP
+; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LIS
+; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LIS,CHECKFP
 
 define arm_aapcs_vfpcc <4 x i32> @shuffle1_i32(<4 x i32> %src) {
 ; CHECK-LABEL: shuffle1_i32:
@@ -221,18 +223,31 @@ entry:
 }
 
 define arm_aapcs_vfpcc <8 x i16> @shuffle3_i16(<8 x i16> %src) {
-; CHECK-LABEL: shuffle3_i16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vmovx.f16 s2, s5
-; CHECK-NEXT:    vmovx.f16 s0, s4
-; CHECK-NEXT:    vins.f16 s5, s4
-; CHECK-NEXT:    vins.f16 s2, s0
-; CHECK-NEXT:    vmov.f32 s3, s5
-; CHECK-NEXT:    vmovx.f16 s1, s7
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vins.f16 s1, s7
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: shuffle3_i16:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    vmov q1, q0
+; CHECK-LV-NEXT:    vmovx.f16 s2, s5
+; CHECK-LV-NEXT:    vmovx.f16 s0, s4
+; CHECK-LV-NEXT:    vins.f16 s5, s4
+; CHECK-LV-NEXT:    vins.f16 s2, s0
+; CHECK-LV-NEXT:    vmov.f32 s3, s5
+; CHECK-LV-NEXT:    vmovx.f16 s1, s7
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vins.f16 s1, s7
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: shuffle3_i16:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    vmovx.f16 s5, s3
+; CHECK-LIS-NEXT:    vmovx.f16 s6, s1
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s0
+; CHECK-LIS-NEXT:    vins.f16 s1, s0
+; CHECK-LIS-NEXT:    vins.f16 s6, s4
+; CHECK-LIS-NEXT:    vins.f16 s5, s3
+; CHECK-LIS-NEXT:    vmov.f32 s7, s1
+; CHECK-LIS-NEXT:    vmov.f32 s4, s2
+; CHECK-LIS-NEXT:    vmov q0, q1
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x i16> %src, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 7, i32 6, i32 3, i32 1, i32 2, i32 0>
   ret <8 x i16> %out
@@ -476,42 +491,79 @@ entry:
 }
 
 define arm_aapcs_vfpcc <16 x i8> @shuffle3_i8(<16 x i8> %src) {
-; CHECK-LABEL: shuffle3_i8:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vmov.u8 r0, q0[4]
-; CHECK-NEXT:    vmov.8 q0[0], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[5]
-; CHECK-NEXT:    vmov.8 q0[1], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[15]
-; CHECK-NEXT:    vmov.8 q0[2], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[7]
-; CHECK-NEXT:    vmov.8 q0[3], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[14]
-; CHECK-NEXT:    vmov.8 q0[4], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[9]
-; CHECK-NEXT:    vmov.8 q0[5], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[6]
-; CHECK-NEXT:    vmov.8 q0[6], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[3]
-; CHECK-NEXT:    vmov.8 q0[7], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[10]
-; CHECK-NEXT:    vmov.8 q0[8], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[12]
-; CHECK-NEXT:    vmov.8 q0[9], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[1]
-; CHECK-NEXT:    vmov.8 q0[10], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[13]
-; CHECK-NEXT:    vmov.8 q0[11], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[2]
-; CHECK-NEXT:    vmov.8 q0[12], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[8]
-; CHECK-NEXT:    vmov.8 q0[13], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[0]
-; CHECK-NEXT:    vmov.8 q0[14], r0
-; CHECK-NEXT:    vmov.u8 r0, q1[11]
-; CHECK-NEXT:    vmov.8 q0[15], r0
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: shuffle3_i8:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    vmov q1, q0
+; CHECK-LV-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-LV-NEXT:    vmov.8 q0[0], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[5]
+; CHECK-LV-NEXT:    vmov.8 q0[1], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[15]
+; CHECK-LV-NEXT:    vmov.8 q0[2], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[7]
+; CHECK-LV-NEXT:    vmov.8 q0[3], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[14]
+; CHECK-LV-NEXT:    vmov.8 q0[4], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[9]
+; CHECK-LV-NEXT:    vmov.8 q0[5], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-LV-NEXT:    vmov.8 q0[6], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[3]
+; CHECK-LV-NEXT:    vmov.8 q0[7], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[10]
+; CHECK-LV-NEXT:    vmov.8 q0[8], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[12]
+; CHECK-LV-NEXT:    vmov.8 q0[9], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[1]
+; CHECK-LV-NEXT:    vmov.8 q0[10], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[13]
+; CHECK-LV-NEXT:    vmov.8 q0[11], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[2]
+; CHECK-LV-NEXT:    vmov.8 q0[12], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[8]
+; CHECK-LV-NEXT:    vmov.8 q0[13], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[0]
+; CHECK-LV-NEXT:    vmov.8 q0[14], r0
+; CHECK-LV-NEXT:    vmov.u8 r0, q1[11]
+; CHECK-LV-NEXT:    vmov.8 q0[15], r0
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: shuffle3_i8:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-LIS-NEXT:    vmov.8 q1[0], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-LIS-NEXT:    vmov.8 q1[1], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-LIS-NEXT:    vmov.8 q1[2], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[7]
+; CHECK-LIS-NEXT:    vmov.8 q1[3], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-LIS-NEXT:    vmov.8 q1[4], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[9]
+; CHECK-LIS-NEXT:    vmov.8 q1[5], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[6]
+; CHECK-LIS-NEXT:    vmov.8 q1[6], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-LIS-NEXT:    vmov.8 q1[7], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-LIS-NEXT:    vmov.8 q1[8], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-LIS-NEXT:    vmov.8 q1[9], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-LIS-NEXT:    vmov.8 q1[10], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-LIS-NEXT:    vmov.8 q1[11], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-LIS-NEXT:    vmov.8 q1[12], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-LIS-NEXT:    vmov.8 q1[13], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[0]
+; CHECK-LIS-NEXT:    vmov.8 q1[14], r0
+; CHECK-LIS-NEXT:    vmov.u8 r0, q0[11]
+; CHECK-LIS-NEXT:    vmov.8 q1[15], r0
+; CHECK-LIS-NEXT:    vmov q0, q1
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %out = shufflevector <16 x i8> %src, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 15, i32 7, i32 14, i32 9, i32 6, i32 3, i32 10, i32 12, i32 1, i32 13, i32 2, i32 8, i32 0, i32 11>
   ret <16 x i8> %out
@@ -1143,18 +1195,31 @@ entry:
 }
 
 define arm_aapcs_vfpcc <8 x half> @shuffle3_f16(<8 x half> %src) {
-; CHECK-LABEL: shuffle3_f16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov q1, q0
-; CHECK-NEXT:    vmovx.f16 s2, s5
-; CHECK-NEXT:    vmovx.f16 s0, s4
-; CHECK-NEXT:    vins.f16 s5, s4
-; CHECK-NEXT:    vins.f16 s2, s0
-; CHECK-NEXT:    vmov.f32 s3, s5
-; CHECK-NEXT:    vmovx.f16 s1, s7
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vins.f16 s1, s7
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: shuffle3_f16:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    vmov q1, q0
+; CHECK-LV-NEXT:    vmovx.f16 s2, s5
+; CHECK-LV-NEXT:    vmovx.f16 s0, s4
+; CHECK-LV-NEXT:    vins.f16 s5, s4
+; CHECK-LV-NEXT:    vins.f16 s2, s0
+; CHECK-LV-NEXT:    vmov.f32 s3, s5
+; CHECK-LV-NEXT:    vmovx.f16 s1, s7
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vins.f16 s1, s7
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: shuffle3_f16:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    vmovx.f16 s5, s3
+; CHECK-LIS-NEXT:    vmovx.f16 s6, s1
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s0
+; CHECK-LIS-NEXT:    vins.f16 s1, s0
+; CHECK-LIS-NEXT:    vins.f16 s6, s4
+; CHECK-LIS-NEXT:    vins.f16 s5, s3
+; CHECK-LIS-NEXT:    vmov.f32 s7, s1
+; CHECK-LIS-NEXT:    vmov.f32 s4, s2
+; CHECK-LIS-NEXT:    vmov q0, q1
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x half> %src, <8 x half> undef, <8 x i32> <i32 4, i32 5, i32 7, i32 6, i32 3, i32 1, i32 2, i32 0>
   ret <8 x half> %out
diff --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
index 4895eabb71ec060..8a94e571e983665 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,CHECK-LV
+; RUN: llc -early-live-intervals -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,CHECK-LIS
 
 ; i32
 
@@ -67,46 +68,87 @@ entry:
 }
 
 define void @vld3_v8i32(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v8i32:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s10, s2
-; CHECK-NEXT:    vmov.f32 s13, s0
-; CHECK-NEXT:    vmov.f32 s14, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s9, s7
-; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmov.f32 s15, s18
-; CHECK-NEXT:    vmov.f32 s11, s17
-; CHECK-NEXT:    vadd.i32 q2, q2, q3
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s16
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s3, s19
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vadd.i32 q0, q2, q0
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s16, s9
-; CHECK-NEXT:    vmov.f32 s19, s14
-; CHECK-NEXT:    vmov.f32 s20, s8
-; CHECK-NEXT:    vmov.f32 s21, s11
-; CHECK-NEXT:    vmov.f32 s23, s13
-; CHECK-NEXT:    vadd.i32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s4, s10
-; CHECK-NEXT:    vmov.f32 s6, s12
-; CHECK-NEXT:    vmov.f32 s7, s15
-; CHECK-NEXT:    vadd.i32 q1, q4, q1
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: vld3_v8i32:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LV-NEXT:    vmov.f32 s10, s2
+; CHECK-LV-NEXT:    vmov.f32 s13, s0
+; CHECK-LV-NEXT:    vmov.f32 s14, s3
+; CHECK-LV-NEXT:    vmov.f32 s8, s4
+; CHECK-LV-NEXT:    vmov.f32 s9, s7
+; CHECK-LV-NEXT:    vmov.f32 s12, s5
+; CHECK-LV-NEXT:    vmov.f32 s15, s18
+; CHECK-LV-NEXT:    vmov.f32 s11, s17
+; CHECK-LV-NEXT:    vadd.i32 q2, q2, q3
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vmov.f32 s2, s16
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LV-NEXT:    vmov.f32 s3, s19
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LV-NEXT:    vadd.i32 q0, q2, q0
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LV-NEXT:    vmov.f32 s17, s4
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LV-NEXT:    vmov.f32 s18, s7
+; CHECK-LV-NEXT:    vmov.f32 s22, s6
+; CHECK-LV-NEXT:    vmov.f32 s16, s9
+; CHECK-LV-NEXT:    vmov.f32 s19, s14
+; CHECK-LV-NEXT:    vmov.f32 s20, s8
+; CHECK-LV-NEXT:    vmov.f32 s21, s11
+; CHECK-LV-NEXT:    vmov.f32 s23, s13
+; CHECK-LV-NEXT:    vadd.i32 q4, q5, q4
+; CHECK-LV-NEXT:    vmov.f32 s4, s10
+; CHECK-LV-NEXT:    vmov.f32 s6, s12
+; CHECK-LV-NEXT:    vmov.f32 s7, s15
+; CHECK-LV-NEXT:    vadd.i32 q1, q4, q1
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: vld3_v8i32:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LIS-NEXT:    vmov.f32 s10, s2
+; CHECK-LIS-NEXT:    vmov.f32 s13, s0
+; CHECK-LIS-NEXT:    vmov.f32 s14, s3
+; CHECK-LIS-NEXT:    vmov.f32 s8, s4
+; CHECK-LIS-NEXT:    vmov.f32 s9, s7
+; CHECK-LIS-NEXT:    vmov.f32 s12, s5
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s11, s17
+; CHECK-LIS-NEXT:    vmov.f32 s0, s6
+; CHECK-LIS-NEXT:    vadd.i32 q2, q2, q3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s16
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s3, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-LIS-NEXT:    vadd.i32 q0, q2, q0
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LIS-NEXT:    vmov.f32 s13, s4
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s14, s7
+; CHECK-LIS-NEXT:    vmov.f32 s22, s6
+; CHECK-LIS-NEXT:    vmov.f32 s12, s9
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s20, s8
+; CHECK-LIS-NEXT:    vmov.f32 s21, s11
+; CHECK-LIS-NEXT:    vmov.f32 s23, s17
+; CHECK-LIS-NEXT:    vadd.i32 q3, q5, q3
+; CHECK-LIS-NEXT:    vmov.f32 s4, s10
+; CHECK-LIS-NEXT:    vmov.f32 s6, s16
+; CHECK-LIS-NEXT:    vmov.f32 s7, s19
+; CHECK-LIS-NEXT:    vadd.i32 q1, q3, q1
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %l1 = load <24 x i32>, ptr %src, align 4
   %s1 = shufflevector <24 x i32> %l1, <24 x i32> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
@@ -119,80 +161,155 @@ entry:
 }
 
 define void @vld3_v16i32(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v16i32:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
-; CHECK-NEXT:    vldrw.u32 q6, [r0, #176]
-; CHECK-NEXT:    vmov.f32 s10, s2
-; CHECK-NEXT:    vmov.f32 s13, s0
-; CHECK-NEXT:    vmov.f32 s14, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s9, s7
-; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmov.f32 s15, s18
-; CHECK-NEXT:    vmov.f32 s11, s17
-; CHECK-NEXT:    vadd.i32 q2, q2, q3
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s16
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s3, s19
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vadd.i32 q0, q2, q0
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s16, s9
-; CHECK-NEXT:    vmov.f32 s19, s14
-; CHECK-NEXT:    vmov.f32 s20, s8
-; CHECK-NEXT:    vmov.f32 s21, s11
-; CHECK-NEXT:    vmov.f32 s23, s13
-; CHECK-NEXT:    vmov.f32 s4, s10
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #160]
-; CHECK-NEXT:    vmov.f32 s6, s12
-; CHECK-NEXT:    vadd.i32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s7, s15
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #144]
-; CHECK-NEXT:    vadd.i32 q1, q4, q1
-; CHECK-NEXT:    vmov.f32 s18, s10
-; CHECK-NEXT:    vmov.f32 s21, s8
-; CHECK-NEXT:    vmov.f32 s22, s11
-; CHECK-NEXT:    vmov.f32 s16, s12
-; CHECK-NEXT:    vmov.f32 s17, s15
-; CHECK-NEXT:    vmov.f32 s20, s13
-; CHECK-NEXT:    vmov.f32 s23, s26
-; CHECK-NEXT:    vmov.f32 s19, s25
-; CHECK-NEXT:    vadd.i32 q4, q4, q5
-; CHECK-NEXT:    vmov.f32 s8, s14
-; CHECK-NEXT:    vmov.f32 s10, s24
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #112]
-; CHECK-NEXT:    vmov.f32 s11, s27
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #128]
-; CHECK-NEXT:    vadd.i32 q2, q4, q2
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #96]
-; CHECK-NEXT:    vmov.f32 s25, s12
-; CHECK-NEXT:    vstrw.32 q2, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s26, s15
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s30, s14
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vmov.f32 s24, s17
-; CHECK-NEXT:    vmov.f32 s27, s22
-; CHECK-NEXT:    vmov.f32 s28, s16
-; CHECK-NEXT:    vmov.f32 s29, s19
-; CHECK-NEXT:    vmov.f32 s31, s21
-; CHECK-NEXT:    vadd.i32 q6, q7, q6
-; CHECK-NEXT:    vmov.f32 s12, s18
-; CHECK-NEXT:    vmov.f32 s14, s20
-; CHECK-NEXT:    vmov.f32 s15, s23
-; CHECK-NEXT:    vadd.i32 q3, q6, q3
-; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: vld3_v16i32:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LV-NEXT:    vldrw.u32 q6, [r0, #176]
+; CHECK-LV-NEXT:    vmov.f32 s10, s2
+; CHECK-LV-NEXT:    vmov.f32 s13, s0
+; CHECK-LV-NEXT:    vmov.f32 s14, s3
+; CHECK-LV-NEXT:    vmov.f32 s8, s4
+; CHECK-LV-NEXT:    vmov.f32 s9, s7
+; CHECK-LV-NEXT:    vmov.f32 s12, s5
+; CHECK-LV-NEXT:    vmov.f32 s15, s18
+; CHECK-LV-NEXT:    vmov.f32 s11, s17
+; CHECK-LV-NEXT:    vadd.i32 q2, q2, q3
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vmov.f32 s2, s16
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LV-NEXT:    vmov.f32 s3, s19
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LV-NEXT:    vadd.i32 q0, q2, q0
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LV-NEXT:    vmov.f32 s17, s4
+; CHECK-LV-NEXT:    vmov.f32 s18, s7
+; CHECK-LV-NEXT:    vmov.f32 s22, s6
+; CHECK-LV-NEXT:    vmov.f32 s16, s9
+; CHECK-LV-NEXT:    vmov.f32 s19, s14
+; CHECK-LV-NEXT:    vmov.f32 s20, s8
+; CHECK-LV-NEXT:    vmov.f32 s21, s11
+; CHECK-LV-NEXT:    vmov.f32 s23, s13
+; CHECK-LV-NEXT:    vmov.f32 s4, s10
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #160]
+; CHECK-LV-NEXT:    vmov.f32 s6, s12
+; CHECK-LV-NEXT:    vadd.i32 q4, q5, q4
+; CHECK-LV-NEXT:    vmov.f32 s7, s15
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #144]
+; CHECK-LV-NEXT:    vadd.i32 q1, q4, q1
+; CHECK-LV-NEXT:    vmov.f32 s18, s10
+; CHECK-LV-NEXT:    vmov.f32 s21, s8
+; CHECK-LV-NEXT:    vmov.f32 s22, s11
+; CHECK-LV-NEXT:    vmov.f32 s16, s12
+; CHECK-LV-NEXT:    vmov.f32 s17, s15
+; CHECK-LV-NEXT:    vmov.f32 s20, s13
+; CHECK-LV-NEXT:    vmov.f32 s23, s26
+; CHECK-LV-NEXT:    vmov.f32 s19, s25
+; CHECK-LV-NEXT:    vadd.i32 q4, q4, q5
+; CHECK-LV-NEXT:    vmov.f32 s8, s14
+; CHECK-LV-NEXT:    vmov.f32 s10, s24
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #112]
+; CHECK-LV-NEXT:    vmov.f32 s11, s27
+; CHECK-LV-NEXT:    vldrw.u32 q5, [r0, #128]
+; CHECK-LV-NEXT:    vadd.i32 q2, q4, q2
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #96]
+; CHECK-LV-NEXT:    vmov.f32 s25, s12
+; CHECK-LV-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-LV-NEXT:    vmov.f32 s26, s15
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LV-NEXT:    vmov.f32 s30, s14
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LV-NEXT:    vmov.f32 s24, s17
+; CHECK-LV-NEXT:    vmov.f32 s27, s22
+; CHECK-LV-NEXT:    vmov.f32 s28, s16
+; CHECK-LV-NEXT:    vmov.f32 s29, s19
+; CHECK-LV-NEXT:    vmov.f32 s31, s21
+; CHECK-LV-NEXT:    vadd.i32 q6, q7, q6
+; CHECK-LV-NEXT:    vmov.f32 s12, s18
+; CHECK-LV-NEXT:    vmov.f32 s14, s20
+; CHECK-LV-NEXT:    vmov.f32 s15, s23
+; CHECK-LV-NEXT:    vadd.i32 q3, q6, q3
+; CHECK-LV-NEXT:    vstrw.32 q3, [r1, #32]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: vld3_v16i32:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LIS-NEXT:    vldrw.u32 q6, [r0, #176]
+; CHECK-LIS-NEXT:    vmov.f32 s10, s2
+; CHECK-LIS-NEXT:    vmov.f32 s13, s0
+; CHECK-LIS-NEXT:    vmov.f32 s14, s3
+; CHECK-LIS-NEXT:    vmov.f32 s8, s4
+; CHECK-LIS-NEXT:    vmov.f32 s9, s7
+; CHECK-LIS-NEXT:    vmov.f32 s12, s5
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s11, s17
+; CHECK-LIS-NEXT:    vmov.f32 s0, s6
+; CHECK-LIS-NEXT:    vadd.i32 q2, q2, q3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s16
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s3, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-LIS-NEXT:    vadd.i32 q0, q2, q0
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LIS-NEXT:    vmov.f32 s13, s4
+; CHECK-LIS-NEXT:    vmov.f32 s14, s7
+; CHECK-LIS-NEXT:    vmov.f32 s22, s6
+; CHECK-LIS-NEXT:    vmov.f32 s12, s9
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s20, s8
+; CHECK-LIS-NEXT:    vmov.f32 s21, s11
+; CHECK-LIS-NEXT:    vmov.f32 s23, s17
+; CHECK-LIS-NEXT:    vadd.i32 q3, q5, q3
+; CHECK-LIS-NEXT:    vmov.f32 s4, s10
+; CHECK-LIS-NEXT:    vmov.f32 s7, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0, #160]
+; CHECK-LIS-NEXT:    vmov.f32 s6, s16
+; CHECK-LIS-NEXT:    vadd.i32 q1, q3, q1
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #144]
+; CHECK-LIS-NEXT:    vmov.f32 s18, s10
+; CHECK-LIS-NEXT:    vmov.f32 s21, s8
+; CHECK-LIS-NEXT:    vmov.f32 s22, s11
+; CHECK-LIS-NEXT:    vmov.f32 s16, s12
+; CHECK-LIS-NEXT:    vmov.f32 s17, s15
+; CHECK-LIS-NEXT:    vmov.f32 s20, s13
+; CHECK-LIS-NEXT:    vmov.f32 s23, s26
+; CHECK-LIS-NEXT:    vmov.f32 s19, s25
+; CHECK-LIS-NEXT:    vmov.f32 s8, s14
+; CHECK-LIS-NEXT:    vadd.i32 q4, q4, q5
+; CHECK-LIS-NEXT:    vmov.f32 s10, s24
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #112]
+; CHECK-LIS-NEXT:    vmov.f32 s11, s27
+; CHECK-LIS-NEXT:    vldrw.u32 q6, [r0, #128]
+; CHECK-LIS-NEXT:    vadd.i32 q2, q4, q2
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #96]
+; CHECK-LIS-NEXT:    vmov.f32 s21, s12
+; CHECK-LIS-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-LIS-NEXT:    vmov.f32 s22, s15
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s30, s14
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LIS-NEXT:    vmov.f32 s20, s17
+; CHECK-LIS-NEXT:    vmov.f32 s23, s26
+; CHECK-LIS-NEXT:    vmov.f32 s28, s16
+; CHECK-LIS-NEXT:    vmov.f32 s29, s19
+; CHECK-LIS-NEXT:    vmov.f32 s31, s25
+; CHECK-LIS-NEXT:    vadd.i32 q5, q7, q5
+; CHECK-LIS-NEXT:    vmov.f32 s12, s18
+; CHECK-LIS-NEXT:    vmov.f32 s14, s24
+; CHECK-LIS-NEXT:    vmov.f32 s15, s27
+; CHECK-LIS-NEXT:    vadd.i32 q3, q5, q3
+; CHECK-LIS-NEXT:    vstrw.32 q3, [r1, #32]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %l1 = load <48 x i32>, ptr %src, align 4
   %s1 = shufflevector <48 x i32> %l1, <48 x i32> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
@@ -247,32 +364,59 @@ entry:
 }
 
 define void @vld3_v4i16(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v4i16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r4, r5, r6, lr}
-; CHECK-NEXT:    push {r4, r5, r6, lr}
-; CHECK-NEXT:    vldrw.u32 q0, [r0]
-; CHECK-NEXT:    vldrh.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.u16 r5, q0[6]
-; CHECK-NEXT:    vmov.u16 r6, q0[0]
-; CHECK-NEXT:    vmov r0, r3, d2
-; CHECK-NEXT:    vmov.u16 lr, q0[2]
-; CHECK-NEXT:    vmov r2, r4, d3
-; CHECK-NEXT:    vmov q1[2], q1[0], r6, r5
-; CHECK-NEXT:    vmov.u16 r5, q0[7]
-; CHECK-NEXT:    vmov.u16 r6, q0[1]
-; CHECK-NEXT:    vmov q2[2], q2[0], r6, r5
-; CHECK-NEXT:    vmov.u16 r5, q0[3]
-; CHECK-NEXT:    vmov.u16 r6, q0[4]
-; CHECK-NEXT:    vmov q1[3], q1[1], r5, r3
-; CHECK-NEXT:    vmov q2[3], q2[1], r6, r2
-; CHECK-NEXT:    vmov.u16 r12, q0[5]
-; CHECK-NEXT:    vadd.i32 q0, q1, q2
-; CHECK-NEXT:    vmov q1[2], q1[0], lr, r0
-; CHECK-NEXT:    vmov q1[3], q1[1], r12, r4
-; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    vstrh.32 q0, [r1]
-; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-LV-LABEL: vld3_v4i16:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-LV-NEXT:    push {r4, r5, r6, lr}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LV-NEXT:    vldrh.u32 q1, [r0, #16]
+; CHECK-LV-NEXT:    vmov.u16 r5, q0[6]
+; CHECK-LV-NEXT:    vmov.u16 r6, q0[0]
+; CHECK-LV-NEXT:    vmov r0, r3, d2
+; CHECK-LV-NEXT:    vmov.u16 lr, q0[2]
+; CHECK-LV-NEXT:    vmov r2, r4, d3
+; CHECK-LV-NEXT:    vmov q1[2], q1[0], r6, r5
+; CHECK-LV-NEXT:    vmov.u16 r5, q0[7]
+; CHECK-LV-NEXT:    vmov.u16 r6, q0[1]
+; CHECK-LV-NEXT:    vmov q2[2], q2[0], r6, r5
+; CHECK-LV-NEXT:    vmov.u16 r5, q0[3]
+; CHECK-LV-NEXT:    vmov.u16 r6, q0[4]
+; CHECK-LV-NEXT:    vmov q1[3], q1[1], r5, r3
+; CHECK-LV-NEXT:    vmov q2[3], q2[1], r6, r2
+; CHECK-LV-NEXT:    vmov.u16 r12, q0[5]
+; CHECK-LV-NEXT:    vadd.i32 q0, q1, q2
+; CHECK-LV-NEXT:    vmov q1[2], q1[0], lr, r0
+; CHECK-LV-NEXT:    vmov q1[3], q1[1], r12, r4
+; CHECK-LV-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-LV-NEXT:    vstrh.32 q0, [r1]
+; CHECK-LV-NEXT:    pop {r4, r5, r6, pc}
+;
+; CHECK-LIS-LABEL: vld3_v4i16:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-LIS-NEXT:    push {r4, r5, r6, lr}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LIS-NEXT:    vldrh.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.u16 r5, q0[6]
+; CHECK-LIS-NEXT:    vmov.u16 r6, q0[0]
+; CHECK-LIS-NEXT:    vmov r0, r2, d2
+; CHECK-LIS-NEXT:    vmov.u16 r12, q0[2]
+; CHECK-LIS-NEXT:    vmov r3, r4, d3
+; CHECK-LIS-NEXT:    vmov q1[2], q1[0], r6, r5
+; CHECK-LIS-NEXT:    vmov.u16 r5, q0[7]
+; CHECK-LIS-NEXT:    vmov.u16 r6, q0[1]
+; CHECK-LIS-NEXT:    vmov q2[2], q2[0], r6, r5
+; CHECK-LIS-NEXT:    vmov.u16 r5, q0[3]
+; CHECK-LIS-NEXT:    vmov.u16 r6, q0[4]
+; CHECK-LIS-NEXT:    vmov q1[3], q1[1], r5, r2
+; CHECK-LIS-NEXT:    vmov q2[3], q2[1], r6, r3
+; CHECK-LIS-NEXT:    vmov.u16 lr, q0[5]
+; CHECK-LIS-NEXT:    vadd.i32 q0, q1, q2
+; CHECK-LIS-NEXT:    vmov q1[2], q1[0], r12, r0
+; CHECK-LIS-NEXT:    vmov q1[3], q1[1], lr, r4
+; CHECK-LIS-NEXT:    vadd.i32 q0, q0, q1
+; CHECK-LIS-NEXT:    vstrh.32 q0, [r1]
+; CHECK-LIS-NEXT:    pop {r4, r5, r6, pc}
 entry:
   %l1 = load <12 x i16>, ptr %src, align 4
   %s1 = shufflevector <12 x i16> %l1, <12 x i16> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
@@ -340,86 +484,167 @@ entry:
 }
 
 define void @vld3_v16i16(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v16i16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT:    vmovx.f16 s6, s2
-; CHECK-NEXT:    vmov.f32 s4, s1
-; CHECK-NEXT:    vins.f16 s4, s6
-; CHECK-NEXT:    vmovx.f16 s6, s9
-; CHECK-NEXT:    vmov.f32 s5, s8
-; CHECK-NEXT:    vmovx.f16 s7, s12
-; CHECK-NEXT:    vins.f16 s5, s6
-; CHECK-NEXT:    vmov.f32 s6, s11
-; CHECK-NEXT:    vins.f16 s6, s7
-; CHECK-NEXT:    vmovx.f16 s16, s15
-; CHECK-NEXT:    vmov.f32 s7, s14
-; CHECK-NEXT:    vmovx.f16 s17, s3
-; CHECK-NEXT:    vins.f16 s7, s16
-; CHECK-NEXT:    vmovx.f16 s16, s0
-; CHECK-NEXT:    vins.f16 s16, s2
-; CHECK-NEXT:    vmovx.f16 s2, s1
-; CHECK-NEXT:    vins.f16 s0, s2
-; CHECK-NEXT:    vmovx.f16 s2, s8
-; CHECK-NEXT:    vins.f16 s3, s2
-; CHECK-NEXT:    vmovx.f16 s2, s11
-; CHECK-NEXT:    vmovx.f16 s8, s14
-; CHECK-NEXT:    vmovx.f16 s18, s10
-; CHECK-NEXT:    vmovx.f16 s19, s13
-; CHECK-NEXT:    vins.f16 s10, s2
-; CHECK-NEXT:    vins.f16 s13, s8
-; CHECK-NEXT:    vmov.f32 s1, s3
-; CHECK-NEXT:    vins.f16 s18, s12
-; CHECK-NEXT:    vins.f16 s19, s15
-; CHECK-NEXT:    vmov.f32 s3, s13
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
-; CHECK-NEXT:    vins.f16 s17, s9
-; CHECK-NEXT:    vmov.f32 s2, s10
-; CHECK-NEXT:    vadd.i16 q0, q0, q4
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
-; CHECK-NEXT:    vadd.i16 q0, q0, q1
-; CHECK-NEXT:    vmovx.f16 s6, s14
-; CHECK-NEXT:    vldrw.u32 q4, [r0]
-; CHECK-NEXT:    vins.f16 s6, s8
-; CHECK-NEXT:    vmov.f32 s22, s15
-; CHECK-NEXT:    vmovx.f16 s8, s8
-; CHECK-NEXT:    vins.f16 s22, s8
-; CHECK-NEXT:    vmovx.f16 s8, s11
-; CHECK-NEXT:    vmov.f32 s23, s10
-; CHECK-NEXT:    vmovx.f16 s4, s16
-; CHECK-NEXT:    vins.f16 s23, s8
-; CHECK-NEXT:    vmovx.f16 s8, s17
-; CHECK-NEXT:    vins.f16 s16, s8
-; CHECK-NEXT:    vmovx.f16 s8, s12
-; CHECK-NEXT:    vmovx.f16 s5, s19
-; CHECK-NEXT:    vins.f16 s19, s8
-; CHECK-NEXT:    vmovx.f16 s8, s15
-; CHECK-NEXT:    vmovx.f16 s7, s9
-; CHECK-NEXT:    vins.f16 s14, s8
-; CHECK-NEXT:    vmovx.f16 s8, s10
-; CHECK-NEXT:    vins.f16 s4, s18
-; CHECK-NEXT:    vmov.f32 s20, s17
-; CHECK-NEXT:    vmovx.f16 s18, s18
-; CHECK-NEXT:    vins.f16 s9, s8
-; CHECK-NEXT:    vins.f16 s5, s13
-; CHECK-NEXT:    vins.f16 s20, s18
-; CHECK-NEXT:    vmov.f32 s17, s19
-; CHECK-NEXT:    vins.f16 s7, s11
-; CHECK-NEXT:    vmovx.f16 s13, s13
-; CHECK-NEXT:    vmov.f32 s21, s12
-; CHECK-NEXT:    vmov.f32 s18, s14
-; CHECK-NEXT:    vins.f16 s21, s13
-; CHECK-NEXT:    vmov.f32 s19, s9
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vadd.i16 q1, q4, q1
-; CHECK-NEXT:    vadd.i16 q1, q1, q5
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: vld3_v16i16:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0, #48]
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #64]
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #80]
+; CHECK-LV-NEXT:    vmovx.f16 s6, s2
+; CHECK-LV-NEXT:    vmov.f32 s4, s1
+; CHECK-LV-NEXT:    vins.f16 s4, s6
+; CHECK-LV-NEXT:    vmovx.f16 s6, s9
+; CHECK-LV-NEXT:    vmov.f32 s5, s8
+; CHECK-LV-NEXT:    vmovx.f16 s7, s12
+; CHECK-LV-NEXT:    vins.f16 s5, s6
+; CHECK-LV-NEXT:    vmov.f32 s6, s11
+; CHECK-LV-NEXT:    vins.f16 s6, s7
+; CHECK-LV-NEXT:    vmovx.f16 s16, s15
+; CHECK-LV-NEXT:    vmov.f32 s7, s14
+; CHECK-LV-NEXT:    vmovx.f16 s17, s3
+; CHECK-LV-NEXT:    vins.f16 s7, s16
+; CHECK-LV-NEXT:    vmovx.f16 s16, s0
+; CHECK-LV-NEXT:    vins.f16 s16, s2
+; CHECK-LV-NEXT:    vmovx.f16 s2, s1
+; CHECK-LV-NEXT:    vins.f16 s0, s2
+; CHECK-LV-NEXT:    vmovx.f16 s2, s8
+; CHECK-LV-NEXT:    vins.f16 s3, s2
+; CHECK-LV-NEXT:    vmovx.f16 s2, s11
+; CHECK-LV-NEXT:    vmovx.f16 s8, s14
+; CHECK-LV-NEXT:    vmovx.f16 s18, s10
+; CHECK-LV-NEXT:    vmovx.f16 s19, s13
+; CHECK-LV-NEXT:    vins.f16 s10, s2
+; CHECK-LV-NEXT:    vins.f16 s13, s8
+; CHECK-LV-NEXT:    vmov.f32 s1, s3
+; CHECK-LV-NEXT:    vins.f16 s18, s12
+; CHECK-LV-NEXT:    vins.f16 s19, s15
+; CHECK-LV-NEXT:    vmov.f32 s3, s13
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #16]
+; CHECK-LV-NEXT:    vins.f16 s17, s9
+; CHECK-LV-NEXT:    vmov.f32 s2, s10
+; CHECK-LV-NEXT:    vadd.i16 q0, q0, q4
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #32]
+; CHECK-LV-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-LV-NEXT:    vmovx.f16 s6, s14
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-LV-NEXT:    vins.f16 s6, s8
+; CHECK-LV-NEXT:    vmov.f32 s22, s15
+; CHECK-LV-NEXT:    vmovx.f16 s8, s8
+; CHECK-LV-NEXT:    vins.f16 s22, s8
+; CHECK-LV-NEXT:    vmovx.f16 s8, s11
+; CHECK-LV-NEXT:    vmov.f32 s23, s10
+; CHECK-LV-NEXT:    vmovx.f16 s4, s16
+; CHECK-LV-NEXT:    vins.f16 s23, s8
+; CHECK-LV-NEXT:    vmovx.f16 s8, s17
+; CHECK-LV-NEXT:    vins.f16 s16, s8
+; CHECK-LV-NEXT:    vmovx.f16 s8, s12
+; CHECK-LV-NEXT:    vmovx.f16 s5, s19
+; CHECK-LV-NEXT:    vins.f16 s19, s8
+; CHECK-LV-NEXT:    vmovx.f16 s8, s15
+; CHECK-LV-NEXT:    vmovx.f16 s7, s9
+; CHECK-LV-NEXT:    vins.f16 s14, s8
+; CHECK-LV-NEXT:    vmovx.f16 s8, s10
+; CHECK-LV-NEXT:    vins.f16 s4, s18
+; CHECK-LV-NEXT:    vmov.f32 s20, s17
+; CHECK-LV-NEXT:    vmovx.f16 s18, s18
+; CHECK-LV-NEXT:    vins.f16 s9, s8
+; CHECK-LV-NEXT:    vins.f16 s5, s13
+; CHECK-LV-NEXT:    vins.f16 s20, s18
+; CHECK-LV-NEXT:    vmov.f32 s17, s19
+; CHECK-LV-NEXT:    vins.f16 s7, s11
+; CHECK-LV-NEXT:    vmovx.f16 s13, s13
+; CHECK-LV-NEXT:    vmov.f32 s21, s12
+; CHECK-LV-NEXT:    vmov.f32 s18, s14
+; CHECK-LV-NEXT:    vins.f16 s21, s13
+; CHECK-LV-NEXT:    vmov.f32 s19, s9
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LV-NEXT:    vadd.i16 q1, q4, q1
+; CHECK-LV-NEXT:    vadd.i16 q1, q1, q5
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: vld3_v16i16:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0, #48]
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0, #64]
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #80]
+; CHECK-LIS-NEXT:    vmovx.f16 s6, s2
+; CHECK-LIS-NEXT:    vmov.f32 s4, s1
+; CHECK-LIS-NEXT:    vins.f16 s4, s6
+; CHECK-LIS-NEXT:    vmovx.f16 s6, s9
+; CHECK-LIS-NEXT:    vmov.f32 s5, s8
+; CHECK-LIS-NEXT:    vmovx.f16 s7, s12
+; CHECK-LIS-NEXT:    vins.f16 s5, s6
+; CHECK-LIS-NEXT:    vmov.f32 s6, s11
+; CHECK-LIS-NEXT:    vins.f16 s6, s7
+; CHECK-LIS-NEXT:    vmovx.f16 s16, s15
+; CHECK-LIS-NEXT:    vmov.f32 s7, s14
+; CHECK-LIS-NEXT:    vmovx.f16 s17, s3
+; CHECK-LIS-NEXT:    vins.f16 s7, s16
+; CHECK-LIS-NEXT:    vmovx.f16 s16, s0
+; CHECK-LIS-NEXT:    vins.f16 s16, s2
+; CHECK-LIS-NEXT:    vmovx.f16 s2, s1
+; CHECK-LIS-NEXT:    vins.f16 s0, s2
+; CHECK-LIS-NEXT:    vmovx.f16 s2, s8
+; CHECK-LIS-NEXT:    vins.f16 s3, s2
+; CHECK-LIS-NEXT:    vmovx.f16 s2, s11
+; CHECK-LIS-NEXT:    vmovx.f16 s8, s14
+; CHECK-LIS-NEXT:    vmovx.f16 s18, s10
+; CHECK-LIS-NEXT:    vmovx.f16 s19, s13
+; CHECK-LIS-NEXT:    vins.f16 s10, s2
+; CHECK-LIS-NEXT:    vins.f16 s13, s8
+; CHECK-LIS-NEXT:    vmov.f32 s1, s3
+; CHECK-LIS-NEXT:    vins.f16 s18, s12
+; CHECK-LIS-NEXT:    vins.f16 s19, s15
+; CHECK-LIS-NEXT:    vmov.f32 s3, s13
+; CHECK-LIS-NEXT:    vins.f16 s17, s9
+; CHECK-LIS-NEXT:    vmov.f32 s2, s10
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #16]
+; CHECK-LIS-NEXT:    vadd.i16 q0, q0, q4
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-LIS-NEXT:    vadd.i16 q0, q0, q1
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #32]
+; CHECK-LIS-NEXT:    vmovx.f16 s10, s14
+; CHECK-LIS-NEXT:    vmov.f32 s22, s15
+; CHECK-LIS-NEXT:    vins.f16 s10, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s4
+; CHECK-LIS-NEXT:    vins.f16 s22, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s7
+; CHECK-LIS-NEXT:    vmov.f32 s23, s6
+; CHECK-LIS-NEXT:    vmovx.f16 s8, s16
+; CHECK-LIS-NEXT:    vins.f16 s23, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s17
+; CHECK-LIS-NEXT:    vins.f16 s16, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s12
+; CHECK-LIS-NEXT:    vmovx.f16 s9, s19
+; CHECK-LIS-NEXT:    vins.f16 s19, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s15
+; CHECK-LIS-NEXT:    vmovx.f16 s11, s5
+; CHECK-LIS-NEXT:    vins.f16 s14, s4
+; CHECK-LIS-NEXT:    vmovx.f16 s4, s6
+; CHECK-LIS-NEXT:    vins.f16 s8, s18
+; CHECK-LIS-NEXT:    vmov.f32 s20, s17
+; CHECK-LIS-NEXT:    vmovx.f16 s18, s18
+; CHECK-LIS-NEXT:    vins.f16 s5, s4
+; CHECK-LIS-NEXT:    vins.f16 s9, s13
+; CHECK-LIS-NEXT:    vins.f16 s20, s18
+; CHECK-LIS-NEXT:    vmov.f32 s17, s19
+; CHECK-LIS-NEXT:    vins.f16 s11, s7
+; CHECK-LIS-NEXT:    vmovx.f16 s13, s13
+; CHECK-LIS-NEXT:    vmov.f32 s21, s12
+; CHECK-LIS-NEXT:    vmov.f32 s18, s14
+; CHECK-LIS-NEXT:    vins.f16 s21, s13
+; CHECK-LIS-NEXT:    vmov.f32 s19, s5
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LIS-NEXT:    vadd.i16 q1, q4, q2
+; CHECK-LIS-NEXT:    vadd.i16 q1, q1, q5
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %l1 = load <48 x i16>, ptr %src, align 4
   %s1 = shufflevector <48 x i16> %l1, <48 x i16> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
@@ -710,35 +935,65 @@ entry:
 ; i64
 
 define void @vld3_v2i64(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v2i64:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    vldrw.u32 q0, [r0]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s12, s2
-; CHECK-NEXT:    vmov.f32 s13, s3
-; CHECK-NEXT:    vmov.f32 s2, s4
-; CHECK-NEXT:    vmov.f32 s3, s5
-; CHECK-NEXT:    vmov r0, r3, d5
-; CHECK-NEXT:    vmov r2, r4, d3
-; CHECK-NEXT:    vmov r6, r7, d0
-; CHECK-NEXT:    vmov r5, r8, d6
-; CHECK-NEXT:    vmov lr, r12, d1
-; CHECK-NEXT:    adds.w r0, r0, lr
-; CHECK-NEXT:    adc.w r3, r3, r12
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adc.w r2, r3, r4
-; CHECK-NEXT:    vmov r3, r4, d4
-; CHECK-NEXT:    adds r6, r6, r5
-; CHECK-NEXT:    adc.w r7, r7, r8
-; CHECK-NEXT:    adds r3, r3, r6
-; CHECK-NEXT:    adcs r7, r4
-; CHECK-NEXT:    vmov q0[2], q0[0], r3, r0
-; CHECK-NEXT:    vmov q0[3], q0[1], r7, r2
-; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; CHECK-LV-LABEL: vld3_v2i64:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; CHECK-LV-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #32]
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #16]
+; CHECK-LV-NEXT:    vmov.f32 s12, s2
+; CHECK-LV-NEXT:    vmov.f32 s13, s3
+; CHECK-LV-NEXT:    vmov.f32 s2, s4
+; CHECK-LV-NEXT:    vmov.f32 s3, s5
+; CHECK-LV-NEXT:    vmov r0, r3, d5
+; CHECK-LV-NEXT:    vmov r2, r4, d3
+; CHECK-LV-NEXT:    vmov r6, r7, d0
+; CHECK-LV-NEXT:    vmov r5, r8, d6
+; CHECK-LV-NEXT:    vmov lr, r12, d1
+; CHECK-LV-NEXT:    adds.w r0, r0, lr
+; CHECK-LV-NEXT:    adc.w r3, r3, r12
+; CHECK-LV-NEXT:    adds r0, r0, r2
+; CHECK-LV-NEXT:    adc.w r2, r3, r4
+; CHECK-LV-NEXT:    vmov r3, r4, d4
+; CHECK-LV-NEXT:    adds r6, r6, r5
+; CHECK-LV-NEXT:    adc.w r7, r7, r8
+; CHECK-LV-NEXT:    adds r3, r3, r6
+; CHECK-LV-NEXT:    adcs r7, r4
+; CHECK-LV-NEXT:    vmov q0[2], q0[0], r3, r0
+; CHECK-LV-NEXT:    vmov q0[3], q0[1], r7, r2
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LV-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+;
+; CHECK-LIS-LABEL: vld3_v2i64:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; CHECK-LIS-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0, #32]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s12, s2
+; CHECK-LIS-NEXT:    vmov.f32 s13, s3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s8
+; CHECK-LIS-NEXT:    vmov.f32 s3, s9
+; CHECK-LIS-NEXT:    vmov r0, r3, d3
+; CHECK-LIS-NEXT:    vmov r2, r4, d5
+; CHECK-LIS-NEXT:    vmov r6, r7, d0
+; CHECK-LIS-NEXT:    vmov r5, r8, d6
+; CHECK-LIS-NEXT:    vmov lr, r12, d1
+; CHECK-LIS-NEXT:    adds.w r0, r0, lr
+; CHECK-LIS-NEXT:    adc.w r3, r3, r12
+; CHECK-LIS-NEXT:    adds r0, r0, r2
+; CHECK-LIS-NEXT:    adc.w r2, r3, r4
+; CHECK-LIS-NEXT:    vmov r3, r4, d2
+; CHECK-LIS-NEXT:    adds r6, r6, r5
+; CHECK-LIS-NEXT:    adc.w r7, r7, r8
+; CHECK-LIS-NEXT:    adds r3, r3, r6
+; CHECK-LIS-NEXT:    adcs r7, r4
+; CHECK-LIS-NEXT:    vmov q0[2], q0[0], r3, r0
+; CHECK-LIS-NEXT:    vmov q0[3], q0[1], r7, r2
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LIS-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
   %l1 = load <6 x i64>, ptr %src, align 4
   %s1 = shufflevector <6 x i64> %l1, <6 x i64> undef, <2 x i32> <i32 0, i32 3>
@@ -751,62 +1006,120 @@ entry:
 }
 
 define void @vld3_v4i64(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v4i64:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12}
-; CHECK-NEXT:    vldrw.u32 q0, [r0]
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #48]
-; CHECK-NEXT:    vmov.f32 s4, s2
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #64]
-; CHECK-NEXT:    vmov.f32 s5, s3
-; CHECK-NEXT:    vmov.f32 s2, s12
-; CHECK-NEXT:    vmov.f32 s3, s13
-; CHECK-NEXT:    vmov r5, r4, d5
-; CHECK-NEXT:    vmov r3, r8, d7
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s24, s22
-; CHECK-NEXT:    vmov.f32 s25, s23
-; CHECK-NEXT:    vmov lr, r12, d1
-; CHECK-NEXT:    vmov.f32 s2, s12
-; CHECK-NEXT:    vmov.f32 s3, s13
-; CHECK-NEXT:    vmov r6, r7, d12
-; CHECK-NEXT:    adds.w r0, r5, lr
-; CHECK-NEXT:    adc.w r5, r4, r12
-; CHECK-NEXT:    adds.w lr, r0, r3
-; CHECK-NEXT:    vmov r4, r2, d10
-; CHECK-NEXT:    adc.w r12, r5, r8
-; CHECK-NEXT:    vmov r5, r0, d8
-; CHECK-NEXT:    adds r6, r6, r4
-; CHECK-NEXT:    adcs r2, r7
-; CHECK-NEXT:    adds r6, r6, r5
-; CHECK-NEXT:    adc.w r8, r2, r0
-; CHECK-NEXT:    vmov r7, r4, d1
-; CHECK-NEXT:    vmov r2, r5, d9
-; CHECK-NEXT:    vmov r3, r0, d0
-; CHECK-NEXT:    adds r2, r2, r7
-; CHECK-NEXT:    adc.w r7, r5, r4
-; CHECK-NEXT:    vmov r5, r4, d7
-; CHECK-NEXT:    adds r2, r2, r5
-; CHECK-NEXT:    adcs r7, r4
-; CHECK-NEXT:    vmov r5, r4, d2
-; CHECK-NEXT:    vmov q1[2], q1[0], r6, r2
-; CHECK-NEXT:    vmov q1[3], q1[1], r8, r7
-; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
-; CHECK-NEXT:    adds r3, r3, r5
-; CHECK-NEXT:    adcs r0, r4
-; CHECK-NEXT:    vmov r4, r5, d4
-; CHECK-NEXT:    adds r3, r3, r4
-; CHECK-NEXT:    vmov q0[2], q0[0], r3, lr
-; CHECK-NEXT:    adcs r0, r5
-; CHECK-NEXT:    vmov q0[3], q0[1], r0, r12
-; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12}
-; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; CHECK-LV-LABEL: vld3_v4i64:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; CHECK-LV-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11, d12}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11, d12}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #16]
+; CHECK-LV-NEXT:    vldrw.u32 q5, [r0, #48]
+; CHECK-LV-NEXT:    vmov.f32 s4, s2
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #64]
+; CHECK-LV-NEXT:    vmov.f32 s5, s3
+; CHECK-LV-NEXT:    vmov.f32 s2, s12
+; CHECK-LV-NEXT:    vmov.f32 s3, s13
+; CHECK-LV-NEXT:    vmov r5, r4, d5
+; CHECK-LV-NEXT:    vmov r3, r8, d7
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #80]
+; CHECK-LV-NEXT:    vmov.f32 s24, s22
+; CHECK-LV-NEXT:    vmov.f32 s25, s23
+; CHECK-LV-NEXT:    vmov lr, r12, d1
+; CHECK-LV-NEXT:    vmov.f32 s2, s12
+; CHECK-LV-NEXT:    vmov.f32 s3, s13
+; CHECK-LV-NEXT:    vmov r6, r7, d12
+; CHECK-LV-NEXT:    adds.w r0, r5, lr
+; CHECK-LV-NEXT:    adc.w r5, r4, r12
+; CHECK-LV-NEXT:    adds.w lr, r0, r3
+; CHECK-LV-NEXT:    vmov r4, r2, d10
+; CHECK-LV-NEXT:    adc.w r12, r5, r8
+; CHECK-LV-NEXT:    vmov r5, r0, d8
+; CHECK-LV-NEXT:    adds r6, r6, r4
+; CHECK-LV-NEXT:    adcs r2, r7
+; CHECK-LV-NEXT:    adds r6, r6, r5
+; CHECK-LV-NEXT:    adc.w r8, r2, r0
+; CHECK-LV-NEXT:    vmov r7, r4, d1
+; CHECK-LV-NEXT:    vmov r2, r5, d9
+; CHECK-LV-NEXT:    vmov r3, r0, d0
+; CHECK-LV-NEXT:    adds r2, r2, r7
+; CHECK-LV-NEXT:    adc.w r7, r5, r4
+; CHECK-LV-NEXT:    vmov r5, r4, d7
+; CHECK-LV-NEXT:    adds r2, r2, r5
+; CHECK-LV-NEXT:    adcs r7, r4
+; CHECK-LV-NEXT:    vmov r5, r4, d2
+; CHECK-LV-NEXT:    vmov q1[2], q1[0], r6, r2
+; CHECK-LV-NEXT:    vmov q1[3], q1[1], r8, r7
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1, #16]
+; CHECK-LV-NEXT:    adds r3, r3, r5
+; CHECK-LV-NEXT:    adcs r0, r4
+; CHECK-LV-NEXT:    vmov r4, r5, d4
+; CHECK-LV-NEXT:    adds r3, r3, r4
+; CHECK-LV-NEXT:    vmov q0[2], q0[0], r3, lr
+; CHECK-LV-NEXT:    adcs r0, r5
+; CHECK-LV-NEXT:    vmov q0[3], q0[1], r0, r12
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11, d12}
+; CHECK-LV-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+;
+; CHECK-LIS-LABEL: vld3_v4i64:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; CHECK-LIS-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11, d12}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11, d12}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vldrw.u32 q5, [r0, #48]
+; CHECK-LIS-NEXT:    vmov.f32 s8, s2
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #64]
+; CHECK-LIS-NEXT:    vmov.f32 s9, s3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s12
+; CHECK-LIS-NEXT:    vmov.f32 s3, s13
+; CHECK-LIS-NEXT:    vmov r2, r3, d3
+; CHECK-LIS-NEXT:    vmov r4, r8, d7
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #80]
+; CHECK-LIS-NEXT:    vmov.f32 s24, s22
+; CHECK-LIS-NEXT:    vmov.f32 s25, s23
+; CHECK-LIS-NEXT:    vmov.f32 s7, s19
+; CHECK-LIS-NEXT:    vmov lr, r12, d1
+; CHECK-LIS-NEXT:    vmov.f32 s2, s12
+; CHECK-LIS-NEXT:    vmov.f32 s3, s13
+; CHECK-LIS-NEXT:    vmov r6, r7, d12
+; CHECK-LIS-NEXT:    adds.w r0, r2, lr
+; CHECK-LIS-NEXT:    adc.w r2, r3, r12
+; CHECK-LIS-NEXT:    adds.w lr, r0, r4
+; CHECK-LIS-NEXT:    vmov r3, r5, d10
+; CHECK-LIS-NEXT:    adc.w r12, r2, r8
+; CHECK-LIS-NEXT:    vmov r2, r0, d8
+; CHECK-LIS-NEXT:    adds r3, r3, r6
+; CHECK-LIS-NEXT:    adcs r7, r5
+; CHECK-LIS-NEXT:    adds r2, r2, r3
+; CHECK-LIS-NEXT:    adc.w r8, r7, r0
+; CHECK-LIS-NEXT:    vmov r6, r5, d1
+; CHECK-LIS-NEXT:    vmov r3, r7, d3
+; CHECK-LIS-NEXT:    vmov r4, r0, d0
+; CHECK-LIS-NEXT:    adds r3, r3, r6
+; CHECK-LIS-NEXT:    adcs r7, r5
+; CHECK-LIS-NEXT:    vmov r6, r5, d7
+; CHECK-LIS-NEXT:    adds r3, r3, r6
+; CHECK-LIS-NEXT:    adcs r7, r5
+; CHECK-LIS-NEXT:    vmov r6, r5, d4
+; CHECK-LIS-NEXT:    adds r4, r4, r6
+; CHECK-LIS-NEXT:    adcs r0, r5
+; CHECK-LIS-NEXT:    vmov r5, r6, d2
+; CHECK-LIS-NEXT:    vmov q1[2], q1[0], r2, r3
+; CHECK-LIS-NEXT:    vmov q1[3], q1[1], r8, r7
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1, #16]
+; CHECK-LIS-NEXT:    adds r4, r4, r5
+; CHECK-LIS-NEXT:    vmov q0[2], q0[0], r4, lr
+; CHECK-LIS-NEXT:    adcs r0, r6
+; CHECK-LIS-NEXT:    vmov q0[3], q0[1], r0, r12
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11, d12}
+; CHECK-LIS-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
   %l1 = load <12 x i64>, ptr %src, align 4
   %s1 = shufflevector <12 x i64> %l1, <12 x i64> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
@@ -881,46 +1194,87 @@ entry:
 }
 
 define void @vld3_v8f32(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v8f32:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s10, s2
-; CHECK-NEXT:    vmov.f32 s13, s0
-; CHECK-NEXT:    vmov.f32 s14, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s9, s7
-; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmov.f32 s15, s18
-; CHECK-NEXT:    vmov.f32 s11, s17
-; CHECK-NEXT:    vadd.f32 q2, q2, q3
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s16
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s3, s19
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vadd.f32 q0, q2, q0
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s16, s9
-; CHECK-NEXT:    vmov.f32 s19, s14
-; CHECK-NEXT:    vmov.f32 s20, s8
-; CHECK-NEXT:    vmov.f32 s21, s11
-; CHECK-NEXT:    vmov.f32 s23, s13
-; CHECK-NEXT:    vadd.f32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s4, s10
-; CHECK-NEXT:    vmov.f32 s6, s12
-; CHECK-NEXT:    vmov.f32 s7, s15
-; CHECK-NEXT:    vadd.f32 q1, q4, q1
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: vld3_v8f32:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LV-NEXT:    vmov.f32 s10, s2
+; CHECK-LV-NEXT:    vmov.f32 s13, s0
+; CHECK-LV-NEXT:    vmov.f32 s14, s3
+; CHECK-LV-NEXT:    vmov.f32 s8, s4
+; CHECK-LV-NEXT:    vmov.f32 s9, s7
+; CHECK-LV-NEXT:    vmov.f32 s12, s5
+; CHECK-LV-NEXT:    vmov.f32 s15, s18
+; CHECK-LV-NEXT:    vmov.f32 s11, s17
+; CHECK-LV-NEXT:    vadd.f32 q2, q2, q3
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vmov.f32 s2, s16
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LV-NEXT:    vmov.f32 s3, s19
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LV-NEXT:    vadd.f32 q0, q2, q0
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LV-NEXT:    vmov.f32 s17, s4
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LV-NEXT:    vmov.f32 s18, s7
+; CHECK-LV-NEXT:    vmov.f32 s22, s6
+; CHECK-LV-NEXT:    vmov.f32 s16, s9
+; CHECK-LV-NEXT:    vmov.f32 s19, s14
+; CHECK-LV-NEXT:    vmov.f32 s20, s8
+; CHECK-LV-NEXT:    vmov.f32 s21, s11
+; CHECK-LV-NEXT:    vmov.f32 s23, s13
+; CHECK-LV-NEXT:    vadd.f32 q4, q5, q4
+; CHECK-LV-NEXT:    vmov.f32 s4, s10
+; CHECK-LV-NEXT:    vmov.f32 s6, s12
+; CHECK-LV-NEXT:    vmov.f32 s7, s15
+; CHECK-LV-NEXT:    vadd.f32 q1, q4, q1
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: vld3_v8f32:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LIS-NEXT:    vmov.f32 s10, s2
+; CHECK-LIS-NEXT:    vmov.f32 s13, s0
+; CHECK-LIS-NEXT:    vmov.f32 s14, s3
+; CHECK-LIS-NEXT:    vmov.f32 s8, s4
+; CHECK-LIS-NEXT:    vmov.f32 s9, s7
+; CHECK-LIS-NEXT:    vmov.f32 s12, s5
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s11, s17
+; CHECK-LIS-NEXT:    vmov.f32 s0, s6
+; CHECK-LIS-NEXT:    vadd.f32 q2, q2, q3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s16
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s3, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-LIS-NEXT:    vadd.f32 q0, q2, q0
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LIS-NEXT:    vmov.f32 s13, s4
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s14, s7
+; CHECK-LIS-NEXT:    vmov.f32 s22, s6
+; CHECK-LIS-NEXT:    vmov.f32 s12, s9
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s20, s8
+; CHECK-LIS-NEXT:    vmov.f32 s21, s11
+; CHECK-LIS-NEXT:    vmov.f32 s23, s17
+; CHECK-LIS-NEXT:    vadd.f32 q3, q5, q3
+; CHECK-LIS-NEXT:    vmov.f32 s4, s10
+; CHECK-LIS-NEXT:    vmov.f32 s6, s16
+; CHECK-LIS-NEXT:    vmov.f32 s7, s19
+; CHECK-LIS-NEXT:    vadd.f32 q1, q3, q1
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %l1 = load <24 x float>, ptr %src, align 4
   %s1 = shufflevector <24 x float> %l1, <24 x float> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
@@ -933,80 +1287,155 @@ entry:
 }
 
 define void @vld3_v16f32(ptr %src, ptr %dst) {
-; CHECK-LABEL: vld3_v16f32:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
-; CHECK-NEXT:    vldrw.u32 q6, [r0, #176]
-; CHECK-NEXT:    vmov.f32 s10, s2
-; CHECK-NEXT:    vmov.f32 s13, s0
-; CHECK-NEXT:    vmov.f32 s14, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s9, s7
-; CHECK-NEXT:    vmov.f32 s12, s5
-; CHECK-NEXT:    vmov.f32 s15, s18
-; CHECK-NEXT:    vmov.f32 s11, s17
-; CHECK-NEXT:    vadd.f32 q2, q2, q3
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s16
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s3, s19
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
-; CHECK-NEXT:    vadd.f32 q0, q2, q0
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s16, s9
-; CHECK-NEXT:    vmov.f32 s19, s14
-; CHECK-NEXT:    vmov.f32 s20, s8
-; CHECK-NEXT:    vmov.f32 s21, s11
-; CHECK-NEXT:    vmov.f32 s23, s13
-; CHECK-NEXT:    vmov.f32 s4, s10
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #160]
-; CHECK-NEXT:    vmov.f32 s6, s12
-; CHECK-NEXT:    vadd.f32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s7, s15
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #144]
-; CHECK-NEXT:    vadd.f32 q1, q4, q1
-; CHECK-NEXT:    vmov.f32 s18, s10
-; CHECK-NEXT:    vmov.f32 s21, s8
-; CHECK-NEXT:    vmov.f32 s22, s11
-; CHECK-NEXT:    vmov.f32 s16, s12
-; CHECK-NEXT:    vmov.f32 s17, s15
-; CHECK-NEXT:    vmov.f32 s20, s13
-; CHECK-NEXT:    vmov.f32 s23, s26
-; CHECK-NEXT:    vmov.f32 s19, s25
-; CHECK-NEXT:    vadd.f32 q4, q4, q5
-; CHECK-NEXT:    vmov.f32 s8, s14
-; CHECK-NEXT:    vmov.f32 s10, s24
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #112]
-; CHECK-NEXT:    vmov.f32 s11, s27
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #128]
-; CHECK-NEXT:    vadd.f32 q2, q4, q2
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #96]
-; CHECK-NEXT:    vmov.f32 s25, s12
-; CHECK-NEXT:    vstrw.32 q2, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s26, s15
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s30, s14
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vmov.f32 s24, s17
-; CHECK-NEXT:    vmov.f32 s27, s22
-; CHECK-NEXT:    vmov.f32 s28, s16
-; CHECK-NEXT:    vmov.f32 s29, s19
-; CHECK-NEXT:    vmov.f32 s31, s21
-; CHECK-NEXT:    vadd.f32 q6, q7, q6
-; CHECK-NEXT:    vmov.f32 s12, s18
-; CHECK-NEXT:    vmov.f32 s14, s20
-; CHECK-NEXT:    vmov.f32 s15, s23
-; CHECK-NEXT:    vadd.f32 q3, q6, q3
-; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
-; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    bx lr
+; CHECK-LV-LABEL: vld3_v16f32:
+; CHECK-LV:       @ %bb.0: @ %entry
+; CHECK-LV-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LV-NEXT:    vldrw.u32 q6, [r0, #176]
+; CHECK-LV-NEXT:    vmov.f32 s10, s2
+; CHECK-LV-NEXT:    vmov.f32 s13, s0
+; CHECK-LV-NEXT:    vmov.f32 s14, s3
+; CHECK-LV-NEXT:    vmov.f32 s8, s4
+; CHECK-LV-NEXT:    vmov.f32 s9, s7
+; CHECK-LV-NEXT:    vmov.f32 s12, s5
+; CHECK-LV-NEXT:    vmov.f32 s15, s18
+; CHECK-LV-NEXT:    vmov.f32 s11, s17
+; CHECK-LV-NEXT:    vadd.f32 q2, q2, q3
+; CHECK-LV-NEXT:    vmov.f32 s0, s6
+; CHECK-LV-NEXT:    vmov.f32 s2, s16
+; CHECK-LV-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LV-NEXT:    vmov.f32 s3, s19
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-LV-NEXT:    vadd.f32 q0, q2, q0
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LV-NEXT:    vmov.f32 s17, s4
+; CHECK-LV-NEXT:    vmov.f32 s18, s7
+; CHECK-LV-NEXT:    vmov.f32 s22, s6
+; CHECK-LV-NEXT:    vmov.f32 s16, s9
+; CHECK-LV-NEXT:    vmov.f32 s19, s14
+; CHECK-LV-NEXT:    vmov.f32 s20, s8
+; CHECK-LV-NEXT:    vmov.f32 s21, s11
+; CHECK-LV-NEXT:    vmov.f32 s23, s13
+; CHECK-LV-NEXT:    vmov.f32 s4, s10
+; CHECK-LV-NEXT:    vldrw.u32 q2, [r0, #160]
+; CHECK-LV-NEXT:    vmov.f32 s6, s12
+; CHECK-LV-NEXT:    vadd.f32 q4, q5, q4
+; CHECK-LV-NEXT:    vmov.f32 s7, s15
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #144]
+; CHECK-LV-NEXT:    vadd.f32 q1, q4, q1
+; CHECK-LV-NEXT:    vmov.f32 s18, s10
+; CHECK-LV-NEXT:    vmov.f32 s21, s8
+; CHECK-LV-NEXT:    vmov.f32 s22, s11
+; CHECK-LV-NEXT:    vmov.f32 s16, s12
+; CHECK-LV-NEXT:    vmov.f32 s17, s15
+; CHECK-LV-NEXT:    vmov.f32 s20, s13
+; CHECK-LV-NEXT:    vmov.f32 s23, s26
+; CHECK-LV-NEXT:    vmov.f32 s19, s25
+; CHECK-LV-NEXT:    vadd.f32 q4, q4, q5
+; CHECK-LV-NEXT:    vmov.f32 s8, s14
+; CHECK-LV-NEXT:    vmov.f32 s10, s24
+; CHECK-LV-NEXT:    vldrw.u32 q3, [r0, #112]
+; CHECK-LV-NEXT:    vmov.f32 s11, s27
+; CHECK-LV-NEXT:    vldrw.u32 q5, [r0, #128]
+; CHECK-LV-NEXT:    vadd.f32 q2, q4, q2
+; CHECK-LV-NEXT:    vldrw.u32 q4, [r0, #96]
+; CHECK-LV-NEXT:    vmov.f32 s25, s12
+; CHECK-LV-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-LV-NEXT:    vmov.f32 s26, s15
+; CHECK-LV-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LV-NEXT:    vmov.f32 s30, s14
+; CHECK-LV-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LV-NEXT:    vmov.f32 s24, s17
+; CHECK-LV-NEXT:    vmov.f32 s27, s22
+; CHECK-LV-NEXT:    vmov.f32 s28, s16
+; CHECK-LV-NEXT:    vmov.f32 s29, s19
+; CHECK-LV-NEXT:    vmov.f32 s31, s21
+; CHECK-LV-NEXT:    vadd.f32 q6, q7, q6
+; CHECK-LV-NEXT:    vmov.f32 s12, s18
+; CHECK-LV-NEXT:    vmov.f32 s14, s20
+; CHECK-LV-NEXT:    vmov.f32 s15, s23
+; CHECK-LV-NEXT:    vadd.f32 q3, q6, q3
+; CHECK-LV-NEXT:    vstrw.32 q3, [r1, #32]
+; CHECK-LV-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LV-NEXT:    bx lr
+;
+; CHECK-LIS-LABEL: vld3_v16f32:
+; CHECK-LIS:       @ %bb.0: @ %entry
+; CHECK-LIS-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-LIS-NEXT:    vldrw.u32 q6, [r0, #176]
+; CHECK-LIS-NEXT:    vmov.f32 s10, s2
+; CHECK-LIS-NEXT:    vmov.f32 s13, s0
+; CHECK-LIS-NEXT:    vmov.f32 s14, s3
+; CHECK-LIS-NEXT:    vmov.f32 s8, s4
+; CHECK-LIS-NEXT:    vmov.f32 s9, s7
+; CHECK-LIS-NEXT:    vmov.f32 s12, s5
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s11, s17
+; CHECK-LIS-NEXT:    vmov.f32 s0, s6
+; CHECK-LIS-NEXT:    vadd.f32 q2, q2, q3
+; CHECK-LIS-NEXT:    vmov.f32 s2, s16
+; CHECK-LIS-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s3, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-LIS-NEXT:    vadd.f32 q0, q2, q0
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-LIS-NEXT:    vmov.f32 s13, s4
+; CHECK-LIS-NEXT:    vmov.f32 s14, s7
+; CHECK-LIS-NEXT:    vmov.f32 s22, s6
+; CHECK-LIS-NEXT:    vmov.f32 s12, s9
+; CHECK-LIS-NEXT:    vmov.f32 s15, s18
+; CHECK-LIS-NEXT:    vmov.f32 s20, s8
+; CHECK-LIS-NEXT:    vmov.f32 s21, s11
+; CHECK-LIS-NEXT:    vmov.f32 s23, s17
+; CHECK-LIS-NEXT:    vadd.f32 q3, q5, q3
+; CHECK-LIS-NEXT:    vmov.f32 s4, s10
+; CHECK-LIS-NEXT:    vmov.f32 s7, s19
+; CHECK-LIS-NEXT:    vldrw.u32 q2, [r0, #160]
+; CHECK-LIS-NEXT:    vmov.f32 s6, s16
+; CHECK-LIS-NEXT:    vadd.f32 q1, q3, q1
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #144]
+; CHECK-LIS-NEXT:    vmov.f32 s18, s10
+; CHECK-LIS-NEXT:    vmov.f32 s21, s8
+; CHECK-LIS-NEXT:    vmov.f32 s22, s11
+; CHECK-LIS-NEXT:    vmov.f32 s16, s12
+; CHECK-LIS-NEXT:    vmov.f32 s17, s15
+; CHECK-LIS-NEXT:    vmov.f32 s20, s13
+; CHECK-LIS-NEXT:    vmov.f32 s23, s26
+; CHECK-LIS-NEXT:    vmov.f32 s19, s25
+; CHECK-LIS-NEXT:    vmov.f32 s8, s14
+; CHECK-LIS-NEXT:    vadd.f32 q4, q4, q5
+; CHECK-LIS-NEXT:    vmov.f32 s10, s24
+; CHECK-LIS-NEXT:    vldrw.u32 q3, [r0, #112]
+; CHECK-LIS-NEXT:    vmov.f32 s11, s27
+; CHECK-LIS-NEXT:    vldrw.u32 q6, [r0, #128]
+; CHECK-LIS-NEXT:    vadd.f32 q2, q4, q2
+; CHECK-LIS-NEXT:    vldrw.u32 q4, [r0, #96]
+; CHECK-LIS-NEXT:    vmov.f32 s21, s12
+; CHECK-LIS-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-LIS-NEXT:    vmov.f32 s22, s15
+; CHECK-LIS-NEXT:    vstrw.32 q0, [r1, #16]
+; CHECK-LIS-NEXT:    vmov.f32 s30, s14
+; CHECK-LIS-NEXT:    vstrw.32 q1, [r1]
+; CHECK-LIS-NEXT:    vmov.f32 s20, s17
+; CHECK-LIS-NEXT:    vmov.f32 s23, s26
+; CHECK-LIS-NEXT:    vmov.f32 s28, s16
+; CHECK-LIS-NEXT:    vmov.f32 s29, s19
+; CHECK-LIS-NEXT:    vmov.f32 s31, s25
+; CHECK-LIS-NEXT:    vadd.f32 q5, q7, q5
+; CHECK-LIS-NEXT:    vmov.f32 s12, s18
+; CHECK-LIS-NEXT:    vmov.f32 s14, s24
+; CHECK-LIS-NEXT:    vmov.f32 s15, s27
+; CHECK-LIS-NEXT:    vadd.f32 q3, q5, q3
+; CHECK-LIS-NEXT:    vstrw.32 q3, [r1, #32]
+; CHECK-LIS-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-LIS-NEXT:    bx lr
 entry:
   %l1 = load <48 x float>, ptr %src, align 4
   %s1 = shufflevector <48 x float> %l1, <48 x float> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>

>From 5621deddf6b2b30e5e8427186c16dd12c73ca484 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 11 Sep 2023 19:51:13 +0100
Subject: [PATCH 2/2] [RISCV] Shrink vslideup's LMUL when lowering fixed
 insert_subvector

Similar to #65598, if we're using a vslideup to insert a fixed length vector
into another vector, then we can work out the minimum number of registers it
will need to slide up across given the minimum VLEN, and shrink the type
operated on to reduce LMUL accordingly.

This is somewhat dependent on #65916, since it introduces a subregister copy
that triggers a crash with -early-live-intervals in one of the tests.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  18 +
 .../rvv/fixed-vectors-insert-subvector.ll     |  45 ++-
 .../rvv/fixed-vectors-strided-load-combine.ll |  80 ++---
 .../CodeGen/RISCV/rvv/fpclamptosat_vec.ll     | 330 ++++++++----------
 4 files changed, 229 insertions(+), 244 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 0dd03076cc05b36..71570f7f9d700f9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8606,6 +8606,18 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
       ContainerVT = getContainerForFixedLengthVector(VecVT);
       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
     }
+
+    // Shrink down Vec so we're performing the slideup on a smaller LMUL.
+    unsigned LastIdx = OrigIdx + SubVecVT.getVectorNumElements() - 1;
+    MVT OrigContainerVT = ContainerVT;
+    SDValue OrigVec = Vec;
+    if (auto ShrunkVT =
+            getSmallestVTForIndex(ContainerVT, LastIdx, DL, DAG, Subtarget)) {
+      ContainerVT = *ShrunkVT;
+      Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ContainerVT, Vec,
+                        DAG.getVectorIdxConstant(0, DL));
+    }
+
     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
                          DAG.getUNDEF(ContainerVT), SubVec,
                          DAG.getConstant(0, DL, XLenVT));
@@ -8636,6 +8648,12 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
                            SlideupAmt, Mask, VL, Policy);
     }
 
+    // If we performed the slideup on a smaller LMUL, insert the result back
+    // into the rest of the vector.
+    if (ContainerVT != OrigContainerVT)
+      SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, OrigContainerVT, OrigVec,
+                           SubVec, DAG.getVectorIdxConstant(0, DL));
+
     if (VecVT.isFixedLengthVector())
       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
     return DAG.getBitcast(Op.getValueType(), SubVec);
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 1d6a45ed36f335c..6a9212ed309a8ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -14,7 +14,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_0(<vscale x 8 x i32> %vec, ptr %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v12, (a0)
-; CHECK-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
   %sv = load <2 x i32>, ptr %svp
@@ -27,7 +27,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_2(<vscale x 8 x i32> %vec, ptr %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v12, (a0)
-; CHECK-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 2
 ; CHECK-NEXT:    ret
   %sv = load <2 x i32>, ptr %svp
@@ -40,7 +40,7 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_6(<vscale x 8 x i32> %vec, ptr %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle32.v v12, (a0)
-; CHECK-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 6
 ; CHECK-NEXT:    ret
   %sv = load <2 x i32>, ptr %svp
@@ -51,22 +51,19 @@ define <vscale x 8 x i32> @insert_nxv8i32_v2i32_6(<vscale x 8 x i32> %vec, ptr %
 define <vscale x 8 x i32> @insert_nxv8i32_v8i32_0(<vscale x 8 x i32> %vec, ptr %svp) {
 ; LMULMAX2-LABEL: insert_nxv8i32_v8i32_0:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT:    vle32.v v12, (a0)
-; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
-; LMULMAX2-NEXT:    vmv.v.v v8, v12
+; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; LMULMAX2-NEXT:    vle32.v v8, (a0)
 ; LMULMAX2-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: insert_nxv8i32_v8i32_0:
 ; LMULMAX1:       # %bb.0:
+; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT:    vle32.v v12, (a0)
-; LMULMAX1-NEXT:    addi a0, a0, 16
-; LMULMAX1-NEXT:    vle32.v v16, (a0)
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vmv.v.v v8, v12
-; LMULMAX1-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vslideup.vi v8, v16, 4
+; LMULMAX1-NEXT:    vle32.v v12, (a1)
+; LMULMAX1-NEXT:    vsetvli zero, zero, e32, m1, tu, ma
+; LMULMAX1-NEXT:    vle32.v v8, (a0)
+; LMULMAX1-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
+; LMULMAX1-NEXT:    vslideup.vi v8, v12, 4
 ; LMULMAX1-NEXT:    ret
   %sv = load <8 x i32>, ptr %svp
   %v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 0)
@@ -84,14 +81,14 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_8(<vscale x 8 x i32> %vec, ptr %
 ;
 ; LMULMAX1-LABEL: insert_nxv8i32_v8i32_8:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT:    vle32.v v12, (a1)
+; LMULMAX1-NEXT:    vle32.v v12, (a0)
+; LMULMAX1-NEXT:    addi a0, a0, 16
 ; LMULMAX1-NEXT:    vle32.v v16, (a0)
 ; LMULMAX1-NEXT:    vsetivli zero, 12, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vslideup.vi v8, v16, 8
+; LMULMAX1-NEXT:    vslideup.vi v8, v12, 8
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vslideup.vi v8, v12, 12
+; LMULMAX1-NEXT:    vslideup.vi v8, v16, 12
 ; LMULMAX1-NEXT:    ret
   %sv = load <8 x i32>, ptr %svp
   %v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 8)
@@ -166,7 +163,7 @@ define void @insert_v8i32_v2i32_0(ptr %vp, ptr %svp) {
 ; LMULMAX2-NEXT:    vle32.v v8, (a1)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX2-NEXT:    vle32.v v10, (a0)
-; LMULMAX2-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; LMULMAX2-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; LMULMAX2-NEXT:    vmv.v.v v10, v8
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX2-NEXT:    vse32.v v10, (a0)
@@ -197,7 +194,7 @@ define void @insert_v8i32_v2i32_2(ptr %vp, ptr %svp) {
 ; LMULMAX2-NEXT:    vle32.v v8, (a1)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX2-NEXT:    vle32.v v10, (a0)
-; LMULMAX2-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; LMULMAX2-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; LMULMAX2-NEXT:    vslideup.vi v10, v8, 2
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX2-NEXT:    vse32.v v10, (a0)
@@ -508,9 +505,9 @@ define void @insert_v2i64_nxv16i64(ptr %psv0, ptr %psv1, <vscale x 16 x i64>* %o
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vle64.v v16, (a1)
-; CHECK-NEXT:    vsetivli zero, 6, e64, m8, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v16, 4
+; CHECK-NEXT:    vle64.v v12, (a1)
+; CHECK-NEXT:    vsetivli zero, 6, e64, m4, tu, ma
+; CHECK-NEXT:    vslideup.vi v8, v12, 4
 ; CHECK-NEXT:    vs8r.v v8, (a2)
 ; CHECK-NEXT:    ret
   %sv0 = load <2 x i64>, ptr %psv0
@@ -539,7 +536,7 @@ define void @insert_v2i64_nxv16i64_lo2(ptr %psv, <vscale x 16 x i64>* %out) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    vsetivli zero, 4, e64, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v16, v8, 2
 ; CHECK-NEXT:    vs8r.v v16, (a1)
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index f52ba6f51d5c897..805557905117add 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -27,13 +27,13 @@ define void @widen_3xv4i16(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a2, a0, 8
-; CHECK-NEXT:    vle16.v v10, (a2)
+; CHECK-NEXT:    vle16.v v9, (a2)
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vle16.v v12, (a0)
-; CHECK-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 4
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
+; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 8
+; CHECK-NEXT:    vslideup.vi v8, v10, 8
 ; CHECK-NEXT:    vse16.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, ptr %x
@@ -75,17 +75,17 @@ define void @widen_4xv4i16_unaligned(ptr %x, ptr %z) {
 ; CHECK-NO-MISALIGN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NO-MISALIGN-NEXT:    vle8.v v8, (a0)
 ; CHECK-NO-MISALIGN-NEXT:    addi a2, a0, 8
-; CHECK-NO-MISALIGN-NEXT:    vle8.v v10, (a2)
+; CHECK-NO-MISALIGN-NEXT:    vle8.v v9, (a2)
 ; CHECK-NO-MISALIGN-NEXT:    addi a2, a0, 16
-; CHECK-NO-MISALIGN-NEXT:    vle8.v v12, (a2)
+; CHECK-NO-MISALIGN-NEXT:    vle8.v v10, (a2)
 ; CHECK-NO-MISALIGN-NEXT:    addi a0, a0, 24
-; CHECK-NO-MISALIGN-NEXT:    vle8.v v14, (a0)
-; CHECK-NO-MISALIGN-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v10, 4
+; CHECK-NO-MISALIGN-NEXT:    vle8.v v12, (a0)
+; CHECK-NO-MISALIGN-NEXT:    vsetvli zero, zero, e16, m1, tu, ma
+; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NO-MISALIGN-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v12, 8
+; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v10, 8
 ; CHECK-NO-MISALIGN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v14, 12
+; CHECK-NO-MISALIGN-NEXT:    vslideup.vi v8, v12, 12
 ; CHECK-NO-MISALIGN-NEXT:    vse16.v v8, (a1)
 ; CHECK-NO-MISALIGN-NEXT:    ret
 ;
@@ -188,17 +188,17 @@ define void @strided_constant_mismatch_4xv4i16(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a2, a0, 2
-; CHECK-NEXT:    vle16.v v10, (a2)
+; CHECK-NEXT:    vle16.v v9, (a2)
 ; CHECK-NEXT:    addi a2, a0, 6
-; CHECK-NEXT:    vle16.v v12, (a2)
+; CHECK-NEXT:    vle16.v v10, (a2)
 ; CHECK-NEXT:    addi a0, a0, 8
-; CHECK-NEXT:    vle16.v v14, (a0)
-; CHECK-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 4
+; CHECK-NEXT:    vle16.v v12, (a0)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
+; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 8
+; CHECK-NEXT:    vslideup.vi v8, v10, 8
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vslideup.vi v8, v14, 12
+; CHECK-NEXT:    vslideup.vi v8, v12, 12
 ; CHECK-NEXT:    vse16.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, ptr %x
@@ -258,17 +258,17 @@ define void @strided_runtime_mismatch_4xv4i16(ptr %x, ptr %z, i64 %s, i64 %t) {
 ; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV32-NEXT:    vle16.v v8, (a0)
 ; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    vle16.v v10, (a0)
+; RV32-NEXT:    vle16.v v9, (a0)
 ; RV32-NEXT:    add a0, a0, a4
-; RV32-NEXT:    vle16.v v12, (a0)
+; RV32-NEXT:    vle16.v v10, (a0)
 ; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    vle16.v v14, (a0)
-; RV32-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v10, 4
+; RV32-NEXT:    vle16.v v12, (a0)
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v9, 4
 ; RV32-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v12, 8
+; RV32-NEXT:    vslideup.vi v8, v10, 8
 ; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v14, 12
+; RV32-NEXT:    vslideup.vi v8, v12, 12
 ; RV32-NEXT:    vse16.v v8, (a1)
 ; RV32-NEXT:    ret
 ;
@@ -277,17 +277,17 @@ define void @strided_runtime_mismatch_4xv4i16(ptr %x, ptr %z, i64 %s, i64 %t) {
 ; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:    add a0, a0, a2
-; RV64-NEXT:    vle16.v v10, (a0)
+; RV64-NEXT:    vle16.v v9, (a0)
 ; RV64-NEXT:    add a0, a0, a3
-; RV64-NEXT:    vle16.v v12, (a0)
+; RV64-NEXT:    vle16.v v10, (a0)
 ; RV64-NEXT:    add a0, a0, a2
-; RV64-NEXT:    vle16.v v14, (a0)
-; RV64-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v10, 4
+; RV64-NEXT:    vle16.v v12, (a0)
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
+; RV64-NEXT:    vslideup.vi v8, v9, 4
 ; RV64-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; RV64-NEXT:    vslideup.vi v8, v12, 8
+; RV64-NEXT:    vslideup.vi v8, v10, 8
 ; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT:    vslideup.vi v8, v14, 12
+; RV64-NEXT:    vslideup.vi v8, v12, 12
 ; RV64-NEXT:    vse16.v v8, (a1)
 ; RV64-NEXT:    ret
 ;
@@ -296,17 +296,17 @@ define void @strided_runtime_mismatch_4xv4i16(ptr %x, ptr %z, i64 %s, i64 %t) {
 ; ZVE64F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVE64F-NEXT:    vle16.v v8, (a0)
 ; ZVE64F-NEXT:    add a0, a0, a2
-; ZVE64F-NEXT:    vle16.v v10, (a0)
+; ZVE64F-NEXT:    vle16.v v9, (a0)
 ; ZVE64F-NEXT:    add a0, a0, a3
-; ZVE64F-NEXT:    vle16.v v12, (a0)
+; ZVE64F-NEXT:    vle16.v v10, (a0)
 ; ZVE64F-NEXT:    add a0, a0, a2
-; ZVE64F-NEXT:    vle16.v v14, (a0)
-; ZVE64F-NEXT:    vsetivli zero, 8, e16, m2, tu, ma
-; ZVE64F-NEXT:    vslideup.vi v8, v10, 4
+; ZVE64F-NEXT:    vle16.v v12, (a0)
+; ZVE64F-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
+; ZVE64F-NEXT:    vslideup.vi v8, v9, 4
 ; ZVE64F-NEXT:    vsetivli zero, 12, e16, m2, tu, ma
-; ZVE64F-NEXT:    vslideup.vi v8, v12, 8
+; ZVE64F-NEXT:    vslideup.vi v8, v10, 8
 ; ZVE64F-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; ZVE64F-NEXT:    vslideup.vi v8, v14, 12
+; ZVE64F-NEXT:    vslideup.vi v8, v12, 12
 ; ZVE64F-NEXT:    vse16.v v8, (a1)
 ; ZVE64F-NEXT:    ret
   %a = load <4 x i16>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index 31e7e7be76c89b1..f598118c18aff9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -460,54 +460,49 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    lui a0, 524288
@@ -632,54 +627,49 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    li a0, -1
@@ -813,54 +803,49 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    li a0, -1
@@ -1454,8 +1439,8 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
@@ -1466,16 +1451,16 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -1484,7 +1469,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -1737,8 +1722,8 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
@@ -1749,16 +1734,16 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -1767,7 +1752,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -2040,8 +2025,8 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
@@ -2052,16 +2037,16 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -2070,7 +2055,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -3796,54 +3781,49 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    lui a0, 524288
@@ -3966,54 +3946,49 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    li a0, -1
@@ -4146,54 +4121,49 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    sub sp, sp, a1
 ; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
 ; CHECK-V-NEXT:    lhu s0, 24(a0)
-; CHECK-V-NEXT:    lhu s1, 16(a0)
-; CHECK-V-NEXT:    lhu s2, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s1, 0(a0)
+; CHECK-V-NEXT:    lhu s2, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 16(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT:    fmv.w.x fa0, s2
-; CHECK-V-NEXT:    call __extendhfsf2 at plt
-; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    addi a0, sp, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
 ; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    fmv.w.x fa0, s2
+; CHECK-V-NEXT:    call __extendhfsf2 at plt
+; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-V-NEXT:    fmv.w.x fa0, s1
+; CHECK-V-NEXT:    vmv.s.x v8, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
+; CHECK-V-NEXT:    vmv.s.x v10, a0
+; CHECK-V-NEXT:    addi a0, sp, 16
+; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    csrr a0, vlenb
 ; CHECK-V-NEXT:    slli a0, a0, 1
 ; CHECK-V-NEXT:    add a0, sp, a0
 ; CHECK-V-NEXT:    addi a0, a0, 16
-; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
-; CHECK-V-NEXT:    csrr a0, vlenb
-; CHECK-V-NEXT:    slli a0, a0, 1
-; CHECK-V-NEXT:    add a0, sp, a0
-; CHECK-V-NEXT:    addi a0, a0, 16
+; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-V-NEXT:    li a0, -1
@@ -4775,8 +4745,8 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
@@ -4787,16 +4757,16 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -4805,7 +4775,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -5054,8 +5024,8 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
@@ -5066,16 +5036,16 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -5084,7 +5054,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -5356,8 +5326,8 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    lhu s3, 32(a0)
 ; CHECK-V-NEXT:    lhu s4, 24(a0)
 ; CHECK-V-NEXT:    lhu s5, 16(a0)
-; CHECK-V-NEXT:    lhu s6, 0(a0)
-; CHECK-V-NEXT:    lhu a0, 8(a0)
+; CHECK-V-NEXT:    lhu s6, 8(a0)
+; CHECK-V-NEXT:    lhu a0, 0(a0)
 ; CHECK-V-NEXT:    fmv.w.x fa0, a0
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
@@ -5368,16 +5338,16 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s6
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-V-NEXT:    fmv.w.x fa0, s5
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
@@ -5386,7 +5356,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-V-NEXT:    fmv.w.x fa0, s4
 ; CHECK-V-NEXT:    call __extendhfsf2 at plt
 ; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT:    vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
 ; CHECK-V-NEXT:    vmv.s.x v8, a0
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload



More information about the llvm-commits mailing list