[llvm] 9823c39 - [RISCV][NFC] Use common prefix to simplify test.

Jianjian GUAN via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 1 21:16:11 PDT 2023


Author: Jianjian GUAN
Date: 2023-06-02T12:16:03+08:00
New Revision: 9823c39afe5c96e68c0afb1e33ffa70c2d9dcc6c

URL: https://github.com/llvm/llvm-project/commit/9823c39afe5c96e68c0afb1e33ffa70c2d9dcc6c
DIFF: https://github.com/llvm/llvm-project/commit/9823c39afe5c96e68c0afb1e33ffa70c2d9dcc6c.diff

LOG: [RISCV][NFC] Use common prefix to simplify test.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D151871

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
index 5190c747d671c..1866183a2ac28 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
@@ -1,25 +1,19 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+v,+experimental-zvfh \
 ; RUN:   -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK-RV32
+; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+v,+experimental-zvfh \
 ; RUN:   -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK-RV64
+; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
 
 declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i8(ptr, i8, <2 x i1>, i32)
 
 define <2 x i8> @strided_vpload_v2i8_i8(ptr %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v2i8_i8:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v2i8_i8:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v2i8_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
+; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i8(ptr %ptr, i8 %stride, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %load
 }
@@ -27,17 +21,11 @@ define <2 x i8> @strided_vpload_v2i8_i8(ptr %ptr, i8 signext %stride, <2 x i1> %
 declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i16(ptr, i16, <2 x i1>, i32)
 
 define <2 x i8> @strided_vpload_v2i8_i16(ptr %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v2i8_i16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v2i8_i16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v2i8_i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
+; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i16(ptr %ptr, i16 %stride, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %load
 }
@@ -63,17 +51,11 @@ define <2 x i8> @strided_vpload_v2i8_i64(ptr %ptr, i64 signext %stride, <2 x i1>
 declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i32(ptr, i32, <2 x i1>, i32)
 
 define <2 x i8> @strided_vpload_v2i8(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v2i8:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v2i8:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
+; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i32(ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %load
 }
@@ -81,33 +63,21 @@ define <2 x i8> @strided_vpload_v2i8(ptr %ptr, i32 signext %stride, <2 x i1> %m,
 declare <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i32(ptr, i32, <4 x i1>, i32)
 
 define <4 x i8> @strided_vpload_v4i8(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v4i8:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
-; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v4i8:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
-; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
+; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %load
 }
 
 define <4 x i8> @strided_vpload_v4i8_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v4i8_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
-; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v4i8_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
-; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v4i8_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
+; CHECK-NEXT:    vlse8.v v8, (a0), a1
+; CHECK-NEXT:    ret
   %a = insertelement <4 x i1> poison, i1 true, i32 0
   %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
   %load = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %b, i32 %evl)
@@ -117,17 +87,11 @@ define <4 x i8> @strided_vpload_v4i8_allones_mask(ptr %ptr, i32 signext %stride,
 declare <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i32(ptr, i32, <8 x i1>, i32)
 
 define <8 x i8> @strided_vpload_v8i8(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v8i8:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
-; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v8i8:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
-; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
+; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %load
 }
@@ -135,17 +99,11 @@ define <8 x i8> @strided_vpload_v8i8(ptr %ptr, i32 signext %stride, <8 x i1> %m,
 declare <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0.i32(ptr, i32, <2 x i1>, i32)
 
 define <2 x i16> @strided_vpload_v2i16(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v2i16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
-; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v2i16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
-; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0.i32(ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %load
 }
@@ -153,17 +111,11 @@ define <2 x i16> @strided_vpload_v2i16(ptr %ptr, i32 signext %stride, <2 x i1> %
 declare <4 x i16> @llvm.experimental.vp.strided.load.v4i16.p0.i32(ptr, i32, <4 x i1>, i32)
 
 define <4 x i16> @strided_vpload_v4i16(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v4i16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
-; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v4i16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
-; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <4 x i16> @llvm.experimental.vp.strided.load.v4i16.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %load
 }
@@ -171,33 +123,21 @@ define <4 x i16> @strided_vpload_v4i16(ptr %ptr, i32 signext %stride, <4 x i1> %
 declare <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0.i32(ptr, i32, <8 x i1>, i32)
 
 define <8 x i16> @strided_vpload_v8i16(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v8i16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
-; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v8i16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
-; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %load
 }
 
 define <8 x i16> @strided_vpload_v8i16_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v8i16_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
-; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v8i16_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
-; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v8i16_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
   %a = insertelement <8 x i1> poison, i1 true, i32 0
   %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
   %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %b, i32 %evl)
@@ -207,17 +147,11 @@ define <8 x i16> @strided_vpload_v8i16_allones_mask(ptr %ptr, i32 signext %strid
 declare <2 x i32> @llvm.experimental.vp.strided.load.v2i32.p0.i32(ptr, i32, <2 x i1>, i32)
 
 define <2 x i32> @strided_vpload_v2i32(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v2i32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
-; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v2i32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
-; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
+; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <2 x i32> @llvm.experimental.vp.strided.load.v2i32.p0.i32(ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %load
 }
@@ -225,17 +159,11 @@ define <2 x i32> @strided_vpload_v2i32(ptr %ptr, i32 signext %stride, <2 x i1> %
 declare <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i32(ptr, i32, <4 x i1>, i32)
 
 define <4 x i32> @strided_vpload_v4i32(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v4i32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v4i32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %load
 }
@@ -243,33 +171,21 @@ define <4 x i32> @strided_vpload_v4i32(ptr %ptr, i32 signext %stride, <4 x i1> %
 declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr, i32, <8 x i1>, i32)
 
 define <8 x i32> @strided_vpload_v8i32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v8i32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v8i32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
+; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %load
 }
 
 define <8 x i32> @strided_vpload_v8i32_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v8i32_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v8i32_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v8i32_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
+; CHECK-NEXT:    vlse32.v v8, (a0), a1
+; CHECK-NEXT:    ret
   %a = insertelement <8 x i1> poison, i1 true, i32 0
   %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
   %load = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %b, i32 %evl)
@@ -279,17 +195,11 @@ define <8 x i32> @strided_vpload_v8i32_allones_mask(ptr %ptr, i32 signext %strid
 declare <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0.i32(ptr, i32, <2 x i1>, i32)
 
 define <2 x i64> @strided_vpload_v2i64(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v2i64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v2i64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0.i32(ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %load
 }
@@ -297,33 +207,21 @@ define <2 x i64> @strided_vpload_v2i64(ptr %ptr, i32 signext %stride, <2 x i1> %
 declare <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i32(ptr, i32, <4 x i1>, i32)
 
 define <4 x i64> @strided_vpload_v4i64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v4i64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v4i64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %load
 }
 
 define <4 x i64> @strided_vpload_v4i64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v4i64_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v4i64_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v4i64_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-NEXT:    ret
   %a = insertelement <4 x i1> poison, i1 true, i32 0
   %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
   %load = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %b, i32 %evl)
@@ -333,17 +231,11 @@ define <4 x i64> @strided_vpload_v4i64_allones_mask(ptr %ptr, i32 signext %strid
 declare <8 x i64> @llvm.experimental.vp.strided.load.v8i64.p0.i32(ptr, i32, <8 x i1>, i32)
 
 define <8 x i64> @strided_vpload_v8i64(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v8i64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v8i64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <8 x i64> @llvm.experimental.vp.strided.load.v8i64.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %load
 }
@@ -351,33 +243,21 @@ define <8 x i64> @strided_vpload_v8i64(ptr %ptr, i32 signext %stride, <8 x i1> %
 declare <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0.i32(ptr, i32, <2 x i1>, i32)
 
 define <2 x half> @strided_vpload_v2f16(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v2f16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
-; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v2f16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
-; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0.i32(ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret <2 x half> %load
 }
 
 define <2 x half> @strided_vpload_v2f16_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v2f16_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
-; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v2f16_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
-; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v2f16_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
   %a = insertelement <2 x i1> poison, i1 true, i32 0
   %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer
   %load = call <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0.i32(ptr %ptr, i32 %stride, <2 x i1> %b, i32 %evl)
@@ -387,17 +267,11 @@ define <2 x half> @strided_vpload_v2f16_allones_mask(ptr %ptr, i32 signext %stri
 declare <4 x half> @llvm.experimental.vp.strided.load.v4f16.p0.i32(ptr, i32, <4 x i1>, i32)
 
 define <4 x half> @strided_vpload_v4f16(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v4f16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
-; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v4f16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
-; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <4 x half> @llvm.experimental.vp.strided.load.v4f16.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret <4 x half> %load
 }
@@ -405,17 +279,11 @@ define <4 x half> @strided_vpload_v4f16(ptr %ptr, i32 signext %stride, <4 x i1>
 declare <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0.i32(ptr, i32, <8 x i1>, i32)
 
 define <8 x half> @strided_vpload_v8f16(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v8f16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
-; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v8f16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
-; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret <8 x half> %load
 }
@@ -423,17 +291,11 @@ define <8 x half> @strided_vpload_v8f16(ptr %ptr, i32 signext %stride, <8 x i1>
 declare <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0.i32(ptr, i32, <2 x i1>, i32)
 
 define <2 x float> @strided_vpload_v2f32(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v2f32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
-; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v2f32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
-; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
+; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0.i32(ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret <2 x float> %load
 }
@@ -441,17 +303,11 @@ define <2 x float> @strided_vpload_v2f32(ptr %ptr, i32 signext %stride, <2 x i1>
 declare <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0.i32(ptr, i32, <4 x i1>, i32)
 
 define <4 x float> @strided_vpload_v4f32(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v4f32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v4f32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret <4 x float> %load
 }
@@ -459,33 +315,21 @@ define <4 x float> @strided_vpload_v4f32(ptr %ptr, i32 signext %stride, <4 x i1>
 declare <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0.i32(ptr, i32, <8 x i1>, i32)
 
 define <8 x float> @strided_vpload_v8f32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v8f32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v8f32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
+; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret <8 x float> %load
 }
 
 define <8 x float> @strided_vpload_v8f32_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v8f32_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v8f32_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v8f32_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
+; CHECK-NEXT:    vlse32.v v8, (a0), a1
+; CHECK-NEXT:    ret
   %a = insertelement <8 x i1> poison, i1 true, i32 0
   %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
   %load = call <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %b, i32 %evl)
@@ -495,17 +339,11 @@ define <8 x float> @strided_vpload_v8f32_allones_mask(ptr %ptr, i32 signext %str
 declare <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0.i32(ptr, i32, <2 x i1>, i32)
 
 define <2 x double> @strided_vpload_v2f64(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v2f64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v2f64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0.i32(ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret <2 x double> %load
 }
@@ -513,33 +351,21 @@ define <2 x double> @strided_vpload_v2f64(ptr %ptr, i32 signext %stride, <2 x i1
 declare <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0.i32(ptr, i32, <4 x i1>, i32)
 
 define <4 x double> @strided_vpload_v4f64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v4f64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v4f64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret <4 x double> %load
 }
 
 define <4 x double> @strided_vpload_v4f64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v4f64_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v4f64_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v4f64_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-NEXT:    ret
   %a = insertelement <4 x i1> poison, i1 true, i32 0
   %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
   %load = call <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %b, i32 %evl)
@@ -549,50 +375,32 @@ define <4 x double> @strided_vpload_v4f64_allones_mask(ptr %ptr, i32 signext %st
 declare <8 x double> @llvm.experimental.vp.strided.load.v8f64.p0.i32(ptr, i32, <8 x i1>, i32)
 
 define <8 x double> @strided_vpload_v8f64(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v8f64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v8f64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <8 x double> @llvm.experimental.vp.strided.load.v8f64.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret <8 x double> %load
 }
 
 ; Widening
 define <3 x double> @strided_vpload_v3f64(ptr %ptr, i32 signext %stride, <3 x i1> %mask, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v3f64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v3f64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v3f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %v = call <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0.i32(ptr %ptr, i32 %stride, <3 x i1> %mask, i32 %evl)
   ret <3 x double> %v
 }
 
 define <3 x double> @strided_vpload_v3f64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpload_v3f64_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v3f64_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v3f64_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-NEXT:    ret
   %one = insertelement <3 x i1> poison, i1 true, i32 0
   %allones = shufflevector <3 x i1> %one, <3 x i1> poison, <3 x i32> zeroinitializer
   %v = call <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0.i32(ptr %ptr, i32 %stride, <3 x i1> %allones, i32 %evl)
@@ -603,97 +411,53 @@ declare <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0.i32(ptr, i32, <
 
 ; Splitting
 define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x i1> %m, i32 zeroext %evl) nounwind {
-; CHECK-RV32-LABEL: strided_vpload_v32f64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    li a4, 16
-; CHECK-RV32-NEXT:    vmv1r.v v8, v0
-; CHECK-RV32-NEXT:    mv a3, a2
-; CHECK-RV32-NEXT:    bltu a2, a4, .LBB33_2
-; CHECK-RV32-NEXT:  # %bb.1:
-; CHECK-RV32-NEXT:    li a3, 16
-; CHECK-RV32-NEXT:  .LBB33_2:
-; CHECK-RV32-NEXT:    mul a4, a3, a1
-; CHECK-RV32-NEXT:    add a4, a0, a4
-; CHECK-RV32-NEXT:    addi a5, a2, -16
-; CHECK-RV32-NEXT:    sltu a2, a2, a5
-; CHECK-RV32-NEXT:    addi a2, a2, -1
-; CHECK-RV32-NEXT:    and a2, a2, a5
-; CHECK-RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-RV32-NEXT:    vslidedown.vi v0, v8, 2
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v16, (a4), a1, v0.t
-; CHECK-RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-RV32-NEXT:    vmv1r.v v0, v8
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v32f64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    li a4, 16
-; CHECK-RV64-NEXT:    vmv1r.v v8, v0
-; CHECK-RV64-NEXT:    mv a3, a2
-; CHECK-RV64-NEXT:    bltu a2, a4, .LBB33_2
-; CHECK-RV64-NEXT:  # %bb.1:
-; CHECK-RV64-NEXT:    li a3, 16
-; CHECK-RV64-NEXT:  .LBB33_2:
-; CHECK-RV64-NEXT:    mul a4, a3, a1
-; CHECK-RV64-NEXT:    add a4, a0, a4
-; CHECK-RV64-NEXT:    addi a5, a2, -16
-; CHECK-RV64-NEXT:    sltu a2, a2, a5
-; CHECK-RV64-NEXT:    addi a2, a2, -1
-; CHECK-RV64-NEXT:    and a2, a2, a5
-; CHECK-RV64-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-RV64-NEXT:    vslidedown.vi v0, v8, 2
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v16, (a4), a1, v0.t
-; CHECK-RV64-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-RV64-NEXT:    vmv1r.v v0, v8
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v32f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a4, 16
+; CHECK-NEXT:    vmv1r.v v8, v0
+; CHECK-NEXT:    mv a3, a2
+; CHECK-NEXT:    bltu a2, a4, .LBB33_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a3, 16
+; CHECK-NEXT:  .LBB33_2:
+; CHECK-NEXT:    mul a4, a3, a1
+; CHECK-NEXT:    add a4, a0, a4
+; CHECK-NEXT:    addi a5, a2, -16
+; CHECK-NEXT:    sltu a2, a2, a5
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    and a2, a2, a5
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v0, v8, 2
+; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT:    vlse64.v v16, (a4), a1, v0.t
+; CHECK-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   %load = call <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr %ptr, i32 %stride, <32 x i1> %m, i32 %evl)
   ret <32 x double> %load
 }
 
 define <32 x double> @strided_vpload_v32f64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) nounwind {
-; CHECK-RV32-LABEL: strided_vpload_v32f64_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    li a4, 16
-; CHECK-RV32-NEXT:    mv a3, a2
-; CHECK-RV32-NEXT:    bltu a2, a4, .LBB34_2
-; CHECK-RV32-NEXT:  # %bb.1:
-; CHECK-RV32-NEXT:    li a3, 16
-; CHECK-RV32-NEXT:  .LBB34_2:
-; CHECK-RV32-NEXT:    mul a4, a3, a1
-; CHECK-RV32-NEXT:    add a4, a0, a4
-; CHECK-RV32-NEXT:    addi a5, a2, -16
-; CHECK-RV32-NEXT:    sltu a2, a2, a5
-; CHECK-RV32-NEXT:    addi a2, a2, -1
-; CHECK-RV32-NEXT:    and a2, a2, a5
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v16, (a4), a1
-; CHECK-RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpload_v32f64_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    li a4, 16
-; CHECK-RV64-NEXT:    mv a3, a2
-; CHECK-RV64-NEXT:    bltu a2, a4, .LBB34_2
-; CHECK-RV64-NEXT:  # %bb.1:
-; CHECK-RV64-NEXT:    li a3, 16
-; CHECK-RV64-NEXT:  .LBB34_2:
-; CHECK-RV64-NEXT:    mul a4, a3, a1
-; CHECK-RV64-NEXT:    add a4, a0, a4
-; CHECK-RV64-NEXT:    addi a5, a2, -16
-; CHECK-RV64-NEXT:    sltu a2, a2, a5
-; CHECK-RV64-NEXT:    addi a2, a2, -1
-; CHECK-RV64-NEXT:    and a2, a2, a5
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v16, (a4), a1
-; CHECK-RV64-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpload_v32f64_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a4, 16
+; CHECK-NEXT:    mv a3, a2
+; CHECK-NEXT:    bltu a2, a4, .LBB34_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a3, 16
+; CHECK-NEXT:  .LBB34_2:
+; CHECK-NEXT:    mul a4, a3, a1
+; CHECK-NEXT:    add a4, a0, a4
+; CHECK-NEXT:    addi a5, a2, -16
+; CHECK-NEXT:    sltu a2, a2, a5
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    and a2, a2, a5
+; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT:    vlse64.v v16, (a4), a1
+; CHECK-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
+; CHECK-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-NEXT:    ret
   %one = insertelement <32 x i1> poison, i1 true, i32 0
   %allones = shufflevector <32 x i1> %one, <32 x i1> poison, <32 x i32> zeroinitializer
   %load = call <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr %ptr, i32 %stride, <32 x i1> %allones, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
index 064b4b493fcdd..de4327ba6479f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
@@ -1,25 +1,19 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+v,+experimental-zvfh \
 ; RUN:   -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK-RV32
+; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+v,+experimental-zvfh \
 ; RUN:   -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s --check-prefixes=CHECK-RV64
+; RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
 
 declare void @llvm.experimental.vp.strided.store.v2i8.p0.i8(<2 x i8>, ptr, i8, <2 x i1>, i32)
 
 define void @strided_vpstore_v2i8_i8(<2 x i8> %val, ptr %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v2i8_i8:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v2i8_i8:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v2i8_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
+; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v2i8.p0.i8(<2 x i8> %val, ptr %ptr, i8 %stride, <2 x i1> %m, i32 %evl)
   ret void
 }
@@ -27,17 +21,11 @@ define void @strided_vpstore_v2i8_i8(<2 x i8> %val, ptr %ptr, i8 signext %stride
 declare void @llvm.experimental.vp.strided.store.v2i8.p0.i16(<2 x i8>, ptr, i16, <2 x i1>, i32)
 
 define void @strided_vpstore_v2i8_i16(<2 x i8> %val, ptr %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v2i8_i16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v2i8_i16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v2i8_i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
+; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v2i8.p0.i16(<2 x i8> %val, ptr %ptr, i16 %stride, <2 x i1> %m, i32 %evl)
   ret void
 }
@@ -63,17 +51,11 @@ define void @strided_vpstore_v2i8_i64(<2 x i8> %val, ptr %ptr, i64 signext %stri
 declare void @llvm.experimental.vp.strided.store.v2i8.p0.i32(<2 x i8>, ptr, i32, <2 x i1>, i32)
 
 define void @strided_vpstore_v2i8(<2 x i8> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v2i8:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v2i8:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
+; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v2i8.p0.i32(<2 x i8> %val, ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret void
 }
@@ -81,17 +63,11 @@ define void @strided_vpstore_v2i8(<2 x i8> %val, ptr %ptr, i32 signext %stride,
 declare void @llvm.experimental.vp.strided.store.v4i8.p0.i32(<4 x i8>, ptr, i32, <4 x i1>, i32)
 
 define void @strided_vpstore_v4i8(<4 x i8> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v4i8:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
-; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v4i8:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
-; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
+; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v4i8.p0.i32(<4 x i8> %val, ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret void
 }
@@ -99,17 +75,11 @@ define void @strided_vpstore_v4i8(<4 x i8> %val, ptr %ptr, i32 signext %stride,
 declare void @llvm.experimental.vp.strided.store.v8i8.p0.i32(<8 x i8>, ptr, i32, <8 x i1>, i32)
 
 define void @strided_vpstore_v8i8(<8 x i8> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v8i8:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
-; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v8i8:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
-; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
+; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v8i8.p0.i32(<8 x i8> %val, ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret void
 }
@@ -117,17 +87,11 @@ define void @strided_vpstore_v8i8(<8 x i8> %val, ptr %ptr, i32 signext %stride,
 declare void @llvm.experimental.vp.strided.store.v2i16.p0.i32(<2 x i16>, ptr, i32, <2 x i1>, i32)
 
 define void @strided_vpstore_v2i16(<2 x i16> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v2i16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
-; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v2i16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
-; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v2i16.p0.i32(<2 x i16> %val, ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret void
 }
@@ -135,17 +99,11 @@ define void @strided_vpstore_v2i16(<2 x i16> %val, ptr %ptr, i32 signext %stride
 declare void @llvm.experimental.vp.strided.store.v4i16.p0.i32(<4 x i16>, ptr, i32, <4 x i1>, i32)
 
 define void @strided_vpstore_v4i16(<4 x i16> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v4i16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
-; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v4i16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
-; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v4i16.p0.i32(<4 x i16> %val, ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret void
 }
@@ -153,17 +111,11 @@ define void @strided_vpstore_v4i16(<4 x i16> %val, ptr %ptr, i32 signext %stride
 declare void @llvm.experimental.vp.strided.store.v8i16.p0.i32(<8 x i16>, ptr, i32, <8 x i1>, i32)
 
 define void @strided_vpstore_v8i16(<8 x i16> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v8i16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
-; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v8i16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
-; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v8i16.p0.i32(<8 x i16> %val, ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret void
 }
@@ -171,17 +123,11 @@ define void @strided_vpstore_v8i16(<8 x i16> %val, ptr %ptr, i32 signext %stride
 declare void @llvm.experimental.vp.strided.store.v2i32.p0.i32(<2 x i32>, ptr, i32, <2 x i1>, i32)
 
 define void @strided_vpstore_v2i32(<2 x i32> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v2i32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
-; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v2i32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
-; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
+; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v2i32.p0.i32(<2 x i32> %val, ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret void
 }
@@ -189,17 +135,11 @@ define void @strided_vpstore_v2i32(<2 x i32> %val, ptr %ptr, i32 signext %stride
 declare void @llvm.experimental.vp.strided.store.v4i32.p0.i32(<4 x i32>, ptr, i32, <4 x i1>, i32)
 
 define void @strided_vpstore_v4i32(<4 x i32> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v4i32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v4i32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v4i32.p0.i32(<4 x i32> %val, ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret void
 }
@@ -207,17 +147,11 @@ define void @strided_vpstore_v4i32(<4 x i32> %val, ptr %ptr, i32 signext %stride
 declare void @llvm.experimental.vp.strided.store.v8i32.p0.i32(<8 x i32>, ptr, i32, <8 x i1>, i32)
 
 define void @strided_vpstore_v8i32(<8 x i32> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v8i32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v8i32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
+; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v8i32.p0.i32(<8 x i32> %val, ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret void
 }
@@ -225,17 +159,11 @@ define void @strided_vpstore_v8i32(<8 x i32> %val, ptr %ptr, i32 signext %stride
 declare void @llvm.experimental.vp.strided.store.v2i64.p0.i32(<2 x i64>, ptr, i32, <2 x i1>, i32)
 
 define void @strided_vpstore_v2i64(<2 x i64> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v2i64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v2i64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v2i64.p0.i32(<2 x i64> %val, ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret void
 }
@@ -243,17 +171,11 @@ define void @strided_vpstore_v2i64(<2 x i64> %val, ptr %ptr, i32 signext %stride
 declare void @llvm.experimental.vp.strided.store.v4i64.p0.i32(<4 x i64>, ptr, i32, <4 x i1>, i32)
 
 define void @strided_vpstore_v4i64(<4 x i64> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v4i64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v4i64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v4i64.p0.i32(<4 x i64> %val, ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret void
 }
@@ -261,17 +183,11 @@ define void @strided_vpstore_v4i64(<4 x i64> %val, ptr %ptr, i32 signext %stride
 declare void @llvm.experimental.vp.strided.store.v8i64.p0.i32(<8 x i64>, ptr, i32, <8 x i1>, i32)
 
 define void @strided_vpstore_v8i64(<8 x i64> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v8i64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v8i64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v8i64.p0.i32(<8 x i64> %val, ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret void
 }
@@ -279,17 +195,11 @@ define void @strided_vpstore_v8i64(<8 x i64> %val, ptr %ptr, i32 signext %stride
 declare void @llvm.experimental.vp.strided.store.v2f16.p0.i32(<2 x half>, ptr, i32, <2 x i1>, i32)
 
 define void @strided_vpstore_v2f16(<2 x half> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v2f16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
-; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v2f16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
-; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v2f16.p0.i32(<2 x half> %val, ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret void
 }
@@ -297,17 +207,11 @@ define void @strided_vpstore_v2f16(<2 x half> %val, ptr %ptr, i32 signext %strid
 declare void @llvm.experimental.vp.strided.store.v4f16.p0.i32(<4 x half>, ptr, i32, <4 x i1>, i32)
 
 define void @strided_vpstore_v4f16(<4 x half> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v4f16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
-; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v4f16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
-; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v4f16.p0.i32(<4 x half> %val, ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret void
 }
@@ -315,17 +219,11 @@ define void @strided_vpstore_v4f16(<4 x half> %val, ptr %ptr, i32 signext %strid
 declare void @llvm.experimental.vp.strided.store.v8f16.p0.i32(<8 x half>, ptr, i32, <8 x i1>, i32)
 
 define void @strided_vpstore_v8f16(<8 x half> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v8f16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
-; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v8f16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
-; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v8f16.p0.i32(<8 x half> %val, ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret void
 }
@@ -333,17 +231,11 @@ define void @strided_vpstore_v8f16(<8 x half> %val, ptr %ptr, i32 signext %strid
 declare void @llvm.experimental.vp.strided.store.v2f32.p0.i32(<2 x float>, ptr, i32, <2 x i1>, i32)
 
 define void @strided_vpstore_v2f32(<2 x float> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v2f32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
-; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v2f32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
-; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
+; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v2f32.p0.i32(<2 x float> %val, ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret void
 }
@@ -351,17 +243,11 @@ define void @strided_vpstore_v2f32(<2 x float> %val, ptr %ptr, i32 signext %stri
 declare void @llvm.experimental.vp.strided.store.v4f32.p0.i32(<4 x float>, ptr, i32, <4 x i1>, i32)
 
 define void @strided_vpstore_v4f32(<4 x float> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v4f32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v4f32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v4f32.p0.i32(<4 x float> %val, ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret void
 }
@@ -369,17 +255,11 @@ define void @strided_vpstore_v4f32(<4 x float> %val, ptr %ptr, i32 signext %stri
 declare void @llvm.experimental.vp.strided.store.v8f32.p0.i32(<8 x float>, ptr, i32, <8 x i1>, i32)
 
 define void @strided_vpstore_v8f32(<8 x float> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v8f32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v8f32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
+; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v8f32.p0.i32(<8 x float> %val, ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret void
 }
@@ -387,17 +267,11 @@ define void @strided_vpstore_v8f32(<8 x float> %val, ptr %ptr, i32 signext %stri
 declare void @llvm.experimental.vp.strided.store.v2f64.p0.i32(<2 x double>, ptr, i32, <2 x i1>, i32)
 
 define void @strided_vpstore_v2f64(<2 x double> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v2f64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v2f64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v2f64.p0.i32(<2 x double> %val, ptr %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
   ret void
 }
@@ -405,17 +279,11 @@ define void @strided_vpstore_v2f64(<2 x double> %val, ptr %ptr, i32 signext %str
 declare void @llvm.experimental.vp.strided.store.v4f64.p0.i32(<4 x double>, ptr, i32, <4 x i1>, i32)
 
 define void @strided_vpstore_v4f64(<4 x double> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v4f64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v4f64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v4f64.p0.i32(<4 x double> %val, ptr %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
   ret void
 }
@@ -423,33 +291,21 @@ define void @strided_vpstore_v4f64(<4 x double> %val, ptr %ptr, i32 signext %str
 declare void @llvm.experimental.vp.strided.store.v8f64.p0.i32(<8 x double>, ptr, i32, <8 x i1>, i32)
 
 define void @strided_vpstore_v8f64(<8 x double> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v8f64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v8f64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v8f64.p0.i32(<8 x double> %val, ptr %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
   ret void
 }
 
 define void @strided_vpstore_v2i8_allones_mask(<2 x i8> %val, ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v2i8_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v2i8_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
-; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v2i8_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
+; CHECK-NEXT:    vsse8.v v8, (a0), a1
+; CHECK-NEXT:    ret
   %a = insertelement <2 x i1> poison, i1 true, i32 0
   %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer
   call void @llvm.experimental.vp.strided.store.v2i8.p0.i32(<2 x i8> %val, ptr %ptr, i32 %stride, <2 x i1> %b, i32 %evl)
@@ -458,33 +314,21 @@ define void @strided_vpstore_v2i8_allones_mask(<2 x i8> %val, ptr %ptr, i32 sign
 
 ; Widening
 define void @strided_vpstore_v3f32(<3 x float> %v, ptr %ptr, i32 signext %stride, <3 x i1> %mask, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v3f32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v3f32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v3f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v3f32.p0.i32(<3 x float> %v, ptr %ptr, i32 %stride, <3 x i1> %mask, i32 %evl)
   ret void
 }
 
 define void @strided_vpstore_v3f32_allones_mask(<3 x float> %v, ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_vpstore_v3f32_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_vpstore_v3f32_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_vpstore_v3f32_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT:    vsse32.v v8, (a0), a1
+; CHECK-NEXT:    ret
   %one = insertelement <3 x i1> poison, i1 true, i32 0
   %allones = shufflevector <3 x i1> %one, <3 x i1> poison, <3 x i32> zeroinitializer
   call void @llvm.experimental.vp.strided.store.v3f32.p0.i32(<3 x float> %v, ptr %ptr, i32 %stride, <3 x i1> %allones, i32 %evl)
@@ -495,93 +339,51 @@ declare void @llvm.experimental.vp.strided.store.v3f32.p0.i32(<3 x float>, ptr ,
 
 ; Splitting
 define void @strided_store_v32f64(<32 x double> %v, ptr %ptr, i32 signext %stride, <32 x i1> %mask, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_store_v32f64:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    li a4, 16
-; CHECK-RV32-NEXT:    mv a3, a2
-; CHECK-RV32-NEXT:    bltu a2, a4, .LBB27_2
-; CHECK-RV32-NEXT:  # %bb.1:
-; CHECK-RV32-NEXT:    li a3, 16
-; CHECK-RV32-NEXT:  .LBB27_2:
-; CHECK-RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    mul a3, a3, a1
-; CHECK-RV32-NEXT:    add a0, a0, a3
-; CHECK-RV32-NEXT:    addi a3, a2, -16
-; CHECK-RV32-NEXT:    sltu a2, a2, a3
-; CHECK-RV32-NEXT:    addi a2, a2, -1
-; CHECK-RV32-NEXT:    and a2, a2, a3
-; CHECK-RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-RV32-NEXT:    vslidedown.vi v0, v0, 2
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-RV32-NEXT:    vsse64.v v16, (a0), a1, v0.t
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_store_v32f64:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    li a4, 16
-; CHECK-RV64-NEXT:    mv a3, a2
-; CHECK-RV64-NEXT:    bltu a2, a4, .LBB27_2
-; CHECK-RV64-NEXT:  # %bb.1:
-; CHECK-RV64-NEXT:    li a3, 16
-; CHECK-RV64-NEXT:  .LBB27_2:
-; CHECK-RV64-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    mul a3, a3, a1
-; CHECK-RV64-NEXT:    add a0, a0, a3
-; CHECK-RV64-NEXT:    addi a3, a2, -16
-; CHECK-RV64-NEXT:    sltu a2, a2, a3
-; CHECK-RV64-NEXT:    addi a2, a2, -1
-; CHECK-RV64-NEXT:    and a2, a2, a3
-; CHECK-RV64-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-RV64-NEXT:    vslidedown.vi v0, v0, 2
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-RV64-NEXT:    vsse64.v v16, (a0), a1, v0.t
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_store_v32f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a4, 16
+; CHECK-NEXT:    mv a3, a2
+; CHECK-NEXT:    bltu a2, a4, .LBB27_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a3, 16
+; CHECK-NEXT:  .LBB27_2:
+; CHECK-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
+; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    mul a3, a3, a1
+; CHECK-NEXT:    add a0, a0, a3
+; CHECK-NEXT:    addi a3, a2, -16
+; CHECK-NEXT:    sltu a2, a2, a3
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    and a2, a2, a3
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v0, v0, 2
+; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT:    vsse64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
   call void @llvm.experimental.vp.strided.store.v32f64.p0.i32(<32 x double> %v, ptr %ptr, i32 %stride, <32 x i1> %mask, i32 %evl)
   ret void
 }
 
 define void @strided_store_v32f64_allones_mask(<32 x double> %v, ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
-; CHECK-RV32-LABEL: strided_store_v32f64_allones_mask:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    li a4, 16
-; CHECK-RV32-NEXT:    mv a3, a2
-; CHECK-RV32-NEXT:    bltu a2, a4, .LBB28_2
-; CHECK-RV32-NEXT:  # %bb.1:
-; CHECK-RV32-NEXT:    li a3, 16
-; CHECK-RV32-NEXT:  .LBB28_2:
-; CHECK-RV32-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-RV32-NEXT:    mul a3, a3, a1
-; CHECK-RV32-NEXT:    add a0, a0, a3
-; CHECK-RV32-NEXT:    addi a3, a2, -16
-; CHECK-RV32-NEXT:    sltu a2, a2, a3
-; CHECK-RV32-NEXT:    addi a2, a2, -1
-; CHECK-RV32-NEXT:    and a2, a2, a3
-; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-RV32-NEXT:    vsse64.v v16, (a0), a1
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: strided_store_v32f64_allones_mask:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    li a4, 16
-; CHECK-RV64-NEXT:    mv a3, a2
-; CHECK-RV64-NEXT:    bltu a2, a4, .LBB28_2
-; CHECK-RV64-NEXT:  # %bb.1:
-; CHECK-RV64-NEXT:    li a3, 16
-; CHECK-RV64-NEXT:  .LBB28_2:
-; CHECK-RV64-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
-; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-RV64-NEXT:    mul a3, a3, a1
-; CHECK-RV64-NEXT:    add a0, a0, a3
-; CHECK-RV64-NEXT:    addi a3, a2, -16
-; CHECK-RV64-NEXT:    sltu a2, a2, a3
-; CHECK-RV64-NEXT:    addi a2, a2, -1
-; CHECK-RV64-NEXT:    and a2, a2, a3
-; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-RV64-NEXT:    vsse64.v v16, (a0), a1
-; CHECK-RV64-NEXT:    ret
+; CHECK-LABEL: strided_store_v32f64_allones_mask:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a4, 16
+; CHECK-NEXT:    mv a3, a2
+; CHECK-NEXT:    bltu a2, a4, .LBB28_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a3, 16
+; CHECK-NEXT:  .LBB28_2:
+; CHECK-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
+; CHECK-NEXT:    vsse64.v v8, (a0), a1
+; CHECK-NEXT:    mul a3, a3, a1
+; CHECK-NEXT:    add a0, a0, a3
+; CHECK-NEXT:    addi a3, a2, -16
+; CHECK-NEXT:    sltu a2, a2, a3
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    and a2, a2, a3
+; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT:    vsse64.v v16, (a0), a1
+; CHECK-NEXT:    ret
   %one = insertelement <32 x i1> poison, i1 true, i32 0
   %allones = shufflevector <32 x i1> %one, <32 x i1> poison, <32 x i32> zeroinitializer
   call void @llvm.experimental.vp.strided.store.v32f64.p0.i32(<32 x double> %v, ptr %ptr, i32 %stride, <32 x i1> %allones, i32 %evl)


        


More information about the llvm-commits mailing list