[llvm] 71b7c03 - [RISCV][VP] Custom lower VP_STRIDED_LOAD and VP_STRIDED_STORE

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 1 09:24:10 PDT 2022


Author: Lorenzo Albano
Date: 2022-08-01T09:23:45-07:00
New Revision: 71b7c03fd66279730ec713aa89d469003aa69c96

URL: https://github.com/llvm/llvm-project/commit/71b7c03fd66279730ec713aa89d469003aa69c96
DIFF: https://github.com/llvm/llvm-project/commit/71b7c03fd66279730ec713aa89d469003aa69c96.diff

LOG: [RISCV][VP] Custom lower VP_STRIDED_LOAD and VP_STRIDED_STORE

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D121113

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
    llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
    llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 59344a1396f15..518f2b16b67df 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -596,8 +596,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                          VT, Custom);
 
       setOperationAction(
-          {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
-          Custom);
+          {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
+           ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
+          VT, Custom);
 
       setOperationAction(
           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
@@ -686,8 +687,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                          VT, Custom);
 
       setOperationAction(
-          {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
-          Custom);
+          {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
+           ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
+          VT, Custom);
 
       setOperationAction(ISD::SELECT, VT, Custom);
       setOperationAction(ISD::SELECT_CC, VT, Expand);
@@ -812,9 +814,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
         setOperationAction(
             {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Custom);
 
-        setOperationAction(
-            {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
-            Custom);
+        setOperationAction({ISD::VP_LOAD, ISD::VP_STORE,
+                            ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
+                            ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
+                            ISD::VP_SCATTER},
+                           VT, Custom);
 
         setOperationAction({ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR,
                             ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV,
@@ -885,9 +889,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                             ISD::MGATHER, ISD::MSCATTER},
                            VT, Custom);
 
-        setOperationAction(
-            {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
-            Custom);
+        setOperationAction({ISD::VP_LOAD, ISD::VP_STORE,
+                            ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
+                            ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
+                            ISD::VP_SCATTER},
+                           VT, Custom);
 
         setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV,
                             ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT,
@@ -3736,6 +3742,10 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
       return lowerVPSetCCMaskOp(Op, DAG);
     return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL);
+  case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
+    return lowerVPStridedLoad(Op, DAG);
+  case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
+    return lowerVPStridedStore(Op, DAG);
   }
 }
 
@@ -6557,6 +6567,89 @@ SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
   return convertFromScalableVector(VT, Val, DAG, Subtarget);
 }
 
+SDValue RISCVTargetLowering::lowerVPStridedLoad(SDValue Op,
+                                                SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  MVT XLenVT = Subtarget.getXLenVT();
+  MVT VT = Op.getSimpleValueType();
+  MVT ContainerVT = VT;
+  if (VT.isFixedLengthVector())
+    ContainerVT = getContainerForFixedLengthVector(VT);
+
+  SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
+
+  auto *VPNode = cast<VPStridedLoadSDNode>(Op);
+  // Check if the mask is known to be all ones
+  SDValue Mask = VPNode->getMask();
+  bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+
+  SDValue IntID = DAG.getTargetConstant(IsUnmasked ? Intrinsic::riscv_vlse
+                                                   : Intrinsic::riscv_vlse_mask,
+                                        DL, XLenVT);
+  SmallVector<SDValue, 8> Ops{VPNode->getChain(), IntID,
+                              DAG.getUNDEF(ContainerVT), VPNode->getBasePtr(),
+                              VPNode->getStride()};
+  if (!IsUnmasked) {
+    if (VT.isFixedLengthVector()) {
+      MVT MaskVT = ContainerVT.changeVectorElementType(MVT::i1);
+      Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+    }
+    Ops.push_back(Mask);
+  }
+  Ops.push_back(VPNode->getVectorLength());
+  if (!IsUnmasked) {
+    SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
+    Ops.push_back(Policy);
+  }
+
+  SDValue Result =
+      DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
+                              VPNode->getMemoryVT(), VPNode->getMemOperand());
+  SDValue Chain = Result.getValue(1);
+
+  if (VT.isFixedLengthVector())
+    Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+
+  return DAG.getMergeValues({Result, Chain}, DL);
+}
+
+SDValue RISCVTargetLowering::lowerVPStridedStore(SDValue Op,
+                                                 SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  MVT XLenVT = Subtarget.getXLenVT();
+
+  auto *VPNode = cast<VPStridedStoreSDNode>(Op);
+  SDValue StoreVal = VPNode->getValue();
+  MVT VT = StoreVal.getSimpleValueType();
+  MVT ContainerVT = VT;
+  if (VT.isFixedLengthVector()) {
+    ContainerVT = getContainerForFixedLengthVector(VT);
+    StoreVal = convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
+  }
+
+  // Check if the mask is known to be all ones
+  SDValue Mask = VPNode->getMask();
+  bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
+
+  SDValue IntID = DAG.getTargetConstant(IsUnmasked ? Intrinsic::riscv_vsse
+                                                   : Intrinsic::riscv_vsse_mask,
+                                        DL, XLenVT);
+  SmallVector<SDValue, 8> Ops{VPNode->getChain(), IntID, StoreVal,
+                              VPNode->getBasePtr(), VPNode->getStride()};
+  if (!IsUnmasked) {
+    if (VT.isFixedLengthVector()) {
+      MVT MaskVT = ContainerVT.changeVectorElementType(MVT::i1);
+      Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
+    }
+    Ops.push_back(Mask);
+  }
+  Ops.push_back(VPNode->getVectorLength());
+
+  return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, VPNode->getVTList(),
+                                 Ops, VPNode->getMemoryVT(),
+                                 VPNode->getMemOperand());
+}
+
 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
 // matched to a RVV indexed load. The RVV indexed load instructions only
 // support the "unsigned unscaled" addressing mode; indices are implicitly

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index a1ab23acbb220..2f5d4d5954e69 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -692,6 +692,8 @@ class RISCVTargetLowering : public TargetLowering {
   SDValue lowerVPSetCCMaskOp(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
                              unsigned RISCVISDOpc) const;
+  SDValue lowerVPStridedLoad(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerVPStridedStore(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG,
                                             unsigned ExtendOpc) const;
   SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
new file mode 100644
index 0000000000000..89fdab53e021d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
@@ -0,0 +1,565 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+v,+experimental-zvfh \
+; RUN:   -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+v,+experimental-zvfh \
+; RUN:   -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK-RV64
+
+declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i8(i8*, i8, <2 x i1>, i32)
+
+define <2 x i8> @strided_vpload_v2i8_i8(i8* %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v2i8_i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v2i8_i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i8(i8* %ptr, i8 %stride, <2 x i1> %m, i32 %evl)
+  ret <2 x i8> %load
+}
+
+declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i16(i8*, i16, <2 x i1>, i32)
+
+define <2 x i8> @strided_vpload_v2i8_i16(i8* %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v2i8_i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v2i8_i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i16(i8* %ptr, i16 %stride, <2 x i1> %m, i32 %evl)
+  ret <2 x i8> %load
+}
+
+declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i64(i8*, i64, <2 x i1>, i32)
+
+define <2 x i8> @strided_vpload_v2i8_i64(i8* %ptr, i64 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v2i8_i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a3, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v2i8_i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i64(i8* %ptr, i64 %stride, <2 x i1> %m, i32 %evl)
+  ret <2 x i8> %load
+}
+
+declare <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i32(i8*, i32, <2 x i1>, i32)
+
+define <2 x i8> @strided_vpload_v2i8(i8* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v2i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v2i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i32(i8* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret <2 x i8> %load
+}
+
+declare <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0i8.i32(i8*, i32, <4 x i1>, i32)
+
+define <4 x i8> @strided_vpload_v4i8(i8* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v4i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v4i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0i8.i32(i8* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret <4 x i8> %load
+}
+
+define <4 x i8> @strided_vpload_v4i8_allones_mask(i8* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v4i8_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v4i8_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <4 x i1> poison, i1 true, i32 0
+  %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
+  %load = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0i8.i32(i8* %ptr, i32 %stride, <4 x i1> %b, i32 %evl)
+  ret <4 x i8> %load
+}
+
+declare <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0i8.i32(i8*, i32, <8 x i1>, i32)
+
+define <8 x i8> @strided_vpload_v8i8(i8* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v8i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v8i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0i8.i32(i8* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret <8 x i8> %load
+}
+
+declare <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0i16.i32(i16*, i32, <2 x i1>, i32)
+
+define <2 x i16> @strided_vpload_v2i16(i16* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v2i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v2i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0i16.i32(i16* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret <2 x i16> %load
+}
+
+declare <4 x i16> @llvm.experimental.vp.strided.load.v4i16.p0i16.i32(i16*, i32, <4 x i1>, i32)
+
+define <4 x i16> @strided_vpload_v4i16(i16* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v4i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v4i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <4 x i16> @llvm.experimental.vp.strided.load.v4i16.p0i16.i32(i16* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret <4 x i16> %load
+}
+
+declare <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0i16.i32(i16*, i32, <8 x i1>, i32)
+
+define <8 x i16> @strided_vpload_v8i16(i16* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v8i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v8i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0i16.i32(i16* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret <8 x i16> %load
+}
+
+define <8 x i16> @strided_vpload_v8i16_allones_mask(i16* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v8i16_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v8i16_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <8 x i1> poison, i1 true, i32 0
+  %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
+  %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0i16.i32(i16* %ptr, i32 %stride, <8 x i1> %b, i32 %evl)
+  ret <8 x i16> %load
+}
+
+declare <2 x i32> @llvm.experimental.vp.strided.load.v2i32.p0i32.i32(i32*, i32, <2 x i1>, i32)
+
+define <2 x i32> @strided_vpload_v2i32(i32* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v2i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v2i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <2 x i32> @llvm.experimental.vp.strided.load.v2i32.p0i32.i32(i32* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret <2 x i32> %load
+}
+
+declare <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0i32.i32(i32*, i32, <4 x i1>, i32)
+
+define <4 x i32> @strided_vpload_v4i32(i32* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v4i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v4i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0i32.i32(i32* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret <4 x i32> %load
+}
+
+declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32*, i32, <8 x i1>, i32)
+
+define <8 x i32> @strided_vpload_v8i32(i32* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v8i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v8i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret <8 x i32> %load
+}
+
+define <8 x i32> @strided_vpload_v8i32_allones_mask(i32* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v8i32_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v8i32_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <8 x i1> poison, i1 true, i32 0
+  %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
+  %load = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32* %ptr, i32 %stride, <8 x i1> %b, i32 %evl)
+  ret <8 x i32> %load
+}
+
+declare <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0i64.i32(i64*, i32, <2 x i1>, i32)
+
+define <2 x i64> @strided_vpload_v2i64(i64* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v2i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v2i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0i64.i32(i64* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret <2 x i64> %load
+}
+
+declare <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0i64.i32(i64*, i32, <4 x i1>, i32)
+
+define <4 x i64> @strided_vpload_v4i64(i64* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v4i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v4i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0i64.i32(i64* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret <4 x i64> %load
+}
+
+define <4 x i64> @strided_vpload_v4i64_allones_mask(i64* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v4i64_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v4i64_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <4 x i1> poison, i1 true, i32 0
+  %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
+  %load = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0i64.i32(i64* %ptr, i32 %stride, <4 x i1> %b, i32 %evl)
+  ret <4 x i64> %load
+}
+
+declare <8 x i64> @llvm.experimental.vp.strided.load.v8i64.p0i64.i32(i64*, i32, <8 x i1>, i32)
+
+define <8 x i64> @strided_vpload_v8i64(i64* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v8i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v8i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <8 x i64> @llvm.experimental.vp.strided.load.v8i64.p0i64.i32(i64* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %load
+}
+
+declare <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0f16.i32(half*, i32, <2 x i1>, i32)
+
+define <2 x half> @strided_vpload_v2f16(half* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v2f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v2f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0f16.i32(half* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret <2 x half> %load
+}
+
+define <2 x half> @strided_vpload_v2f16_allones_mask(half* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v2f16_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v2f16_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <2 x i1> poison, i1 true, i32 0
+  %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer
+  %load = call <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0f16.i32(half* %ptr, i32 %stride, <2 x i1> %b, i32 %evl)
+  ret <2 x half> %load
+}
+
+declare <4 x half> @llvm.experimental.vp.strided.load.v4f16.p0f16.i32(half*, i32, <4 x i1>, i32)
+
+define <4 x half> @strided_vpload_v4f16(half* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v4f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v4f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <4 x half> @llvm.experimental.vp.strided.load.v4f16.p0f16.i32(half* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret <4 x half> %load
+}
+
+declare <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0f16.i32(half*, i32, <8 x i1>, i32)
+
+define <8 x half> @strided_vpload_v8f16(half* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v8f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v8f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0f16.i32(half* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret <8 x half> %load
+}
+
+declare <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0f32.i32(float*, i32, <2 x i1>, i32)
+
+define <2 x float> @strided_vpload_v2f32(float* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v2f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v2f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0f32.i32(float* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret <2 x float> %load
+}
+
+declare <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0f32.i32(float*, i32, <4 x i1>, i32)
+
+define <4 x float> @strided_vpload_v4f32(float* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v4f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v4f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0f32.i32(float* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret <4 x float> %load
+}
+
+declare <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0f32.i32(float*, i32, <8 x i1>, i32)
+
+define <8 x float> @strided_vpload_v8f32(float* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v8f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v8f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0f32.i32(float* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret <8 x float> %load
+}
+
+define <8 x float> @strided_vpload_v8f32_allones_mask(float* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v8f32_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v8f32_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <8 x i1> poison, i1 true, i32 0
+  %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
+  %load = call <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0f32.i32(float* %ptr, i32 %stride, <8 x i1> %b, i32 %evl)
+  ret <8 x float> %load
+}
+
+declare <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0f64.i32(double*, i32, <2 x i1>, i32)
+
+define <2 x double> @strided_vpload_v2f64(double* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v2f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v2f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0f64.i32(double* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret <2 x double> %load
+}
+
+declare <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0f64.i32(double*, i32, <4 x i1>, i32)
+
+define <4 x double> @strided_vpload_v4f64(double* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v4f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v4f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0f64.i32(double* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret <4 x double> %load
+}
+
+define <4 x double> @strided_vpload_v4f64_allones_mask(double* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v4f64_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v4f64_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <4 x i1> poison, i1 true, i32 0
+  %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
+  %load = call <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0f64.i32(double* %ptr, i32 %stride, <4 x i1> %b, i32 %evl)
+  ret <4 x double> %load
+}
+
+declare <8 x double> @llvm.experimental.vp.strided.load.v8f64.p0f64.i32(double*, i32, <8 x i1>, i32)
+
+define <8 x double> @strided_vpload_v8f64(double* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_v8f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_v8f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <8 x double> @llvm.experimental.vp.strided.load.v8f64.p0f64.i32(double* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %load
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
new file mode 100644
index 0000000000000..1cee800ef0983
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
@@ -0,0 +1,457 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+v,+experimental-zvfh \
+; RUN:   -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+v,+experimental-zvfh \
+; RUN:   -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK-RV64
+
+declare void @llvm.experimental.vp.strided.store.v2i8.p0i8.i8(<2 x i8>, i8*, i8, <2 x i1>, i32)
+
+define void @strided_vpstore_v2i8_i8(<2 x i8> %val, i8* %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v2i8_i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v2i8_i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i8(<2 x i8> %val, i8* %ptr, i8 %stride, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v2i8.p0i8.i16(<2 x i8>, i8*, i16, <2 x i1>, i32)
+
+define void @strided_vpstore_v2i8_i16(<2 x i8> %val, i8* %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v2i8_i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v2i8_i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i16(<2 x i8> %val, i8* %ptr, i16 %stride, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v2i8.p0i8.i64(<2 x i8>, i8*, i64, <2 x i1>, i32)
+
+define void @strided_vpstore_v2i8_i64(<2 x i8> %val, i8* %ptr, i64 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v2i8_i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a3, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v2i8_i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i64(<2 x i8> %val, i8* %ptr, i64 %stride, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v2i8.p0i8.i32(<2 x i8>, i8*, i32, <2 x i1>, i32)
+
+define void @strided_vpstore_v2i8(<2 x i8> %val, i8* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v2i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v2i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i32(<2 x i8> %val, i8* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v4i8.p0i8.i32(<4 x i8>, i8*, i32, <4 x i1>, i32)
+
+define void @strided_vpstore_v4i8(<4 x i8> %val, i8* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v4i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v4i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v4i8.p0i8.i32(<4 x i8> %val, i8* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v8i8.p0i8.i32(<8 x i8>, i8*, i32, <8 x i1>, i32)
+
+define void @strided_vpstore_v8i8(<8 x i8> %val, i8* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v8i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v8i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v8i8.p0i8.i32(<8 x i8> %val, i8* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v2i16.p0i16.i32(<2 x i16>, i16*, i32, <2 x i1>, i32)
+
+define void @strided_vpstore_v2i16(<2 x i16> %val, i16* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v2i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v2i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v2i16.p0i16.i32(<2 x i16> %val, i16* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v4i16.p0i16.i32(<4 x i16>, i16*, i32, <4 x i1>, i32)
+
+define void @strided_vpstore_v4i16(<4 x i16> %val, i16* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v4i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v4i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v4i16.p0i16.i32(<4 x i16> %val, i16* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v8i16.p0i16.i32(<8 x i16>, i16*, i32, <8 x i1>, i32)
+
+define void @strided_vpstore_v8i16(<8 x i16> %val, i16* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v8i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v8i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v8i16.p0i16.i32(<8 x i16> %val, i16* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v2i32.p0i32.i32(<2 x i32>, i32*, i32, <2 x i1>, i32)
+
+define void @strided_vpstore_v2i32(<2 x i32> %val, i32* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v2i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v2i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v2i32.p0i32.i32(<2 x i32> %val, i32* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v4i32.p0i32.i32(<4 x i32>, i32*, i32, <4 x i1>, i32)
+
+define void @strided_vpstore_v4i32(<4 x i32> %val, i32* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v4i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v4i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v4i32.p0i32.i32(<4 x i32> %val, i32* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v8i32.p0i32.i32(<8 x i32>, i32*, i32, <8 x i1>, i32)
+
+define void @strided_vpstore_v8i32(<8 x i32> %val, i32* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v8i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v8i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v8i32.p0i32.i32(<8 x i32> %val, i32* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v2i64.p0i64.i32(<2 x i64>, i64*, i32, <2 x i1>, i32)
+
+define void @strided_vpstore_v2i64(<2 x i64> %val, i64* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v2i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v2i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v2i64.p0i64.i32(<2 x i64> %val, i64* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v4i64.p0i64.i32(<4 x i64>, i64*, i32, <4 x i1>, i32)
+
+define void @strided_vpstore_v4i64(<4 x i64> %val, i64* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v4i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v4i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v4i64.p0i64.i32(<4 x i64> %val, i64* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v8i64.p0i64.i32(<8 x i64>, i64*, i32, <8 x i1>, i32)
+
+define void @strided_vpstore_v8i64(<8 x i64> %val, i64* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v8i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v8i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v8i64.p0i64.i32(<8 x i64> %val, i64* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v2f16.p0f16.i32(<2 x half>, half*, i32, <2 x i1>, i32)
+
+define void @strided_vpstore_v2f16(<2 x half> %val, half* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v2f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v2f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v2f16.p0f16.i32(<2 x half> %val, half* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v4f16.p0f16.i32(<4 x half>, half*, i32, <4 x i1>, i32)
+
+define void @strided_vpstore_v4f16(<4 x half> %val, half* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v4f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v4f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v4f16.p0f16.i32(<4 x half> %val, half* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v8f16.p0f16.i32(<8 x half>, half*, i32, <8 x i1>, i32)
+
+define void @strided_vpstore_v8f16(<8 x half> %val, half* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v8f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v8f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v8f16.p0f16.i32(<8 x half> %val, half* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v2f32.p0f32.i32(<2 x float>, float*, i32, <2 x i1>, i32)
+
+define void @strided_vpstore_v2f32(<2 x float> %val, float* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v2f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v2f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v2f32.p0f32.i32(<2 x float> %val, float* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v4f32.p0f32.i32(<4 x float>, float*, i32, <4 x i1>, i32)
+
+define void @strided_vpstore_v4f32(<4 x float> %val, float* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v4f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v4f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v4f32.p0f32.i32(<4 x float> %val, float* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v8f32.p0f32.i32(<8 x float>, float*, i32, <8 x i1>, i32)
+
+define void @strided_vpstore_v8f32(<8 x float> %val, float* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v8f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v8f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v8f32.p0f32.i32(<8 x float> %val, float* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v2f64.p0f64.i32(<2 x double>, double*, i32, <2 x i1>, i32)
+
+define void @strided_vpstore_v2f64(<2 x double> %val, double* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v2f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v2f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v2f64.p0f64.i32(<2 x double> %val, double* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v4f64.p0f64.i32(<4 x double>, double*, i32, <4 x i1>, i32)
+
+define void @strided_vpstore_v4f64(<4 x double> %val, double* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v4f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v4f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v4f64.p0f64.i32(<4 x double> %val, double* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.v8f64.p0f64.i32(<8 x double>, double*, i32, <8 x i1>, i32)
+
+define void @strided_vpstore_v8f64(<8 x double> %val, double* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v8f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v8f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.v8f64.p0f64.i32(<8 x double> %val, double* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @strided_vpstore_v2i8_allones_mask(<2 x i8> %val, i8* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_v2i8_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_v2i8_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <2 x i1> poison, i1 true, i32 0
+  %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer
+  call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i32(<2 x i8> %val, i8* %ptr, i32 %stride, <2 x i1> %b, i32 %evl)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
new file mode 100644
index 0000000000000..ead60611bc070
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
@@ -0,0 +1,725 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+v,+experimental-zvfh \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+v,+experimental-zvfh \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV64
+
+declare <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i8(i8*, i8, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @strided_vpload_nxv1i8_i8(i8* %ptr, i8 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i8(i8* %ptr, i8 %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i8> %load
+}
+
+declare <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i16(i8*, i16, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @strided_vpload_nxv1i8_i16(i8* %ptr, i16 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i16(i8* %ptr, i16 %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i8> %load
+}
+
+declare <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i64(i8*, i64, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @strided_vpload_nxv1i8_i64(i8* %ptr, i64 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a3, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i64(i8* %ptr, i64 %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i8> %load
+}
+
+define <vscale x 1 x i8> @strided_vpload_nxv1i8_i64_allones_mask(i8* %ptr, i64 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a3, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i64_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i64(i8* %ptr, i64 %stride, <vscale x 1 x i1> %b, i32 %evl)
+  ret <vscale x 1 x i8> %load
+}
+
+declare <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i32(i8*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @strided_vpload_nxv1i8(i8* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i32(i8* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i8> %load
+}
+
+define <vscale x 1 x i8> @strided_vpload_nxv1i8_allones_mask(i8* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1i8_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1i8_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i32(i8* %ptr, i32 signext %stride, <vscale x 1 x i1> %b, i32 %evl)
+  ret <vscale x 1 x i8> %load
+}
+
+declare <vscale x 2 x i8> @llvm.experimental.vp.strided.load.nxv2i8.p0i8.i32(i8*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @strided_vpload_nxv2i8(i8* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv2i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv2i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 2 x i8> @llvm.experimental.vp.strided.load.nxv2i8.p0i8.i32(i8* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i8> %load
+}
+
+declare <vscale x 4 x i8> @llvm.experimental.vp.strided.load.nxv4i8.p0i8.i32(i8*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @strided_vpload_nxv4i8(i8* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv4i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv4i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 4 x i8> @llvm.experimental.vp.strided.load.nxv4i8.p0i8.i32(i8* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i8> %load
+}
+
+declare <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0i8.i32(i8*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @strided_vpload_nxv8i8(i8* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv8i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv8i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0i8.i32(i8* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i8> %load
+}
+
+define <vscale x 8 x i8> @strided_vpload_nxv8i8_allones_mask(i8* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv8i8_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse8.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv8i8_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %load = call <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0i8.i32(i8* %ptr, i32 signext %stride, <vscale x 8 x i1> %b, i32 %evl)
+  ret <vscale x 8 x i8> %load
+}
+
+declare <vscale x 1 x i16> @llvm.experimental.vp.strided.load.nxv1i16.p0i16.i32(i16*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @strided_vpload_nxv1i16(i16* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 1 x i16> @llvm.experimental.vp.strided.load.nxv1i16.p0i16.i32(i16* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i16> %load
+}
+
+declare <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0i16.i32(i16*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @strided_vpload_nxv2i16(i16* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv2i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv2i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0i16.i32(i16* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i16> %load
+}
+
+define <vscale x 2 x i16> @strided_vpload_nxv2i16_allones_mask(i16* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv2i16_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv2i16_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %load = call <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0i16.i32(i16* %ptr, i32 signext %stride, <vscale x 2 x i1> %b, i32 %evl)
+  ret <vscale x 2 x i16> %load
+}
+
+declare <vscale x 4 x i16> @llvm.experimental.vp.strided.load.nxv4i16.p0i16.i32(i16*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @strided_vpload_nxv4i16(i16* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv4i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv4i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 4 x i16> @llvm.experimental.vp.strided.load.nxv4i16.p0i16.i32(i16* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i16> %load
+}
+
+declare <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0i16.i32(i16*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @strided_vpload_nxv8i16(i16* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv8i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv8i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0i16.i32(i16* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i16> %load
+}
+
+declare <vscale x 1 x i32> @llvm.experimental.vp.strided.load.nxv1i32.p0i32.i32(i32*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @strided_vpload_nxv1i32(i32* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 1 x i32> @llvm.experimental.vp.strided.load.nxv1i32.p0i32.i32(i32* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i32> %load
+}
+
+declare <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0i32.i32(i32*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @strided_vpload_nxv2i32(i32* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv2i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv2i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0i32.i32(i32* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i32> %load
+}
+
+declare <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0i32.i32(i32*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @strided_vpload_nxv4i32(i32* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv4i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv4i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0i32.i32(i32* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i32> %load
+}
+
+define <vscale x 4 x i32> @strided_vpload_nxv4i32_allones_mask(i32* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv4i32_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv4i32_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %load = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0i32.i32(i32* %ptr, i32 signext %stride, <vscale x 4 x i1> %b, i32 %evl)
+  ret <vscale x 4 x i32> %load
+}
+
+declare <vscale x 8 x i32> @llvm.experimental.vp.strided.load.nxv8i32.p0i32.i32(i32*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @strided_vpload_nxv8i32(i32* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv8i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv8i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 8 x i32> @llvm.experimental.vp.strided.load.nxv8i32.p0i32.i32(i32* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i32> %load
+}
+
+declare <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0i64.i32(i64*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i64> @strided_vpload_nxv1i64(i64* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0i64.i32(i64* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i64> %load
+}
+
+define <vscale x 1 x i64> @strided_vpload_nxv1i64_allones_mask(i64* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1i64_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1i64_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %load = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0i64.i32(i64* %ptr, i32 signext %stride, <vscale x 1 x i1> %b, i32 %evl)
+  ret <vscale x 1 x i64> %load
+}
+
+declare <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0i64.i32(i64*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i64> @strided_vpload_nxv2i64(i64* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv2i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv2i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0i64.i32(i64* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i64> %load
+}
+
+declare <vscale x 4 x i64> @llvm.experimental.vp.strided.load.nxv4i64.p0i64.i32(i64*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i64> @strided_vpload_nxv4i64(i64* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv4i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv4i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 4 x i64> @llvm.experimental.vp.strided.load.nxv4i64.p0i64.i32(i64* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i64> %load
+}
+
+declare <vscale x 8 x i64> @llvm.experimental.vp.strided.load.nxv8i64.p0i64.i32(i64*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i64> @strided_vpload_nxv8i64(i64* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv8i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv8i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 8 x i64> @llvm.experimental.vp.strided.load.nxv8i64.p0i64.i32(i64* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %load
+}
+
+declare <vscale x 1 x half> @llvm.experimental.vp.strided.load.nxv1f16.p0f16.i32(half*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @strided_vpload_nxv1f16(half* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 1 x half> @llvm.experimental.vp.strided.load.nxv1f16.p0f16.i32(half* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x half> %load
+}
+
+declare <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0f16.i32(half*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @strided_vpload_nxv2f16(half* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv2f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv2f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0f16.i32(half* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x half> %load
+}
+
+define <vscale x 2 x half> @strided_vpload_nxv2f16_allones_mask(half* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv2f16_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv2f16_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %load = call <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0f16.i32(half* %ptr, i32 signext %stride, <vscale x 2 x i1> %b, i32 %evl)
+  ret <vscale x 2 x half> %load
+}
+
+declare <vscale x 4 x half> @llvm.experimental.vp.strided.load.nxv4f16.p0f16.i32(half*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @strided_vpload_nxv4f16(half* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv4f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv4f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 4 x half> @llvm.experimental.vp.strided.load.nxv4f16.p0f16.i32(half* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x half> %load
+}
+
+declare <vscale x 8 x half> @llvm.experimental.vp.strided.load.nxv8f16.p0f16.i32(half*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x half> @strided_vpload_nxv8f16(half* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv8f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv8f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 8 x half> @llvm.experimental.vp.strided.load.nxv8f16.p0f16.i32(half* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x half> %load
+}
+
+declare <vscale x 1 x float> @llvm.experimental.vp.strided.load.nxv1f32.p0f32.i32(float*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @strided_vpload_nxv1f32(float* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 1 x float> @llvm.experimental.vp.strided.load.nxv1f32.p0f32.i32(float* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x float> %load
+}
+
+declare <vscale x 2 x float> @llvm.experimental.vp.strided.load.nxv2f32.p0f32.i32(float*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @strided_vpload_nxv2f32(float* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv2f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv2f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 2 x float> @llvm.experimental.vp.strided.load.nxv2f32.p0f32.i32(float* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x float> %load
+}
+
+declare <vscale x 4 x float> @llvm.experimental.vp.strided.load.nxv4f32.p0f32.i32(float*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x float> @strided_vpload_nxv4f32(float* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv4f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv4f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 4 x float> @llvm.experimental.vp.strided.load.nxv4f32.p0f32.i32(float* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x float> %load
+}
+
+declare <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0f32.i32(float*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x float> @strided_vpload_nxv8f32(float* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv8f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv8f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0f32.i32(float* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x float> %load
+}
+
+define <vscale x 8 x float> @strided_vpload_nxv8f32_allones_mask(float* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv8f32_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV32-NEXT:    vlse32.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv8f32_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV64-NEXT:    vlse32.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %load = call <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0f32.i32(float* %ptr, i32 signext %stride, <vscale x 8 x i1> %b, i32 %evl)
+  ret <vscale x 8 x float> %load
+}
+
+declare <vscale x 1 x double> @llvm.experimental.vp.strided.load.nxv1f64.p0f64.i32(double*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @strided_vpload_nxv1f64(double* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv1f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv1f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 1 x double> @llvm.experimental.vp.strided.load.nxv1f64.p0f64.i32(double* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x double> %load
+}
+
+declare <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0f64.i32(double*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x double> @strided_vpload_nxv2f64(double* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv2f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv2f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0f64.i32(double* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x double> %load
+}
+
+declare <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0f64.i32(double*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x double> @strided_vpload_nxv4f64(double* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv4f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv4f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0f64.i32(double* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x double> %load
+}
+
+define <vscale x 4 x double> @strided_vpload_nxv4f64_allones_mask(double* %ptr, i32 signext %stride, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv4f64_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv4f64_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %load = call <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0f64.i32(double* %ptr, i32 signext %stride, <vscale x 4 x i1> %b, i32 %evl)
+  ret <vscale x 4 x double> %load
+}
+
+declare <vscale x 8 x double> @llvm.experimental.vp.strided.load.nxv8f64.p0f64.i32(double*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x double> @strided_vpload_nxv8f64(double* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpload_nxv8f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV32-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpload_nxv8f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  %load = call <vscale x 8 x double> @llvm.experimental.vp.strided.load.nxv8f64.p0f64.i32(double* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %load
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
new file mode 100644
index 0000000000000..bc472871a626c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
@@ -0,0 +1,581 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+v,+experimental-zvfh \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+v,+experimental-zvfh \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV64
+
+declare void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i8(<vscale x 1 x i8>, i8*, i8, <vscale x 1 x i1>, i32)
+
+define void @strided_vpstore_nxv1i8_i8(<vscale x 1 x i8> %val, i8* %ptr, i8 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i8(<vscale x 1 x i8> %val, i8* %ptr, i8 %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i16(<vscale x 1 x i8>, i8*, i16, <vscale x 1 x i1>, i32)
+
+define void @strided_vpstore_nxv1i8_i16(<vscale x 1 x i8> %val, i8* %ptr, i16 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i16(<vscale x 1 x i8> %val, i8* %ptr, i16 %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i64(<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i32)
+
+define void @strided_vpstore_nxv1i8_i64(<vscale x 1 x i8> %val, i8* %ptr, i64 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a3, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i64(<vscale x 1 x i8> %val, i8* %ptr, i64 %stride, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i32(<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define void @strided_vpstore_nxv1i8(<vscale x 1 x i8> %val, i8* %ptr, i32 signext %strided, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv1i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv1i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i32(<vscale x 1 x i8> %val, i8* %ptr, i32 %strided, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv2i8.p0i8.i32(<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define void @strided_vpstore_nxv2i8(<vscale x 2 x i8> %val, i8* %ptr, i32 signext %strided, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv2i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv2i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv2i8.p0i8.i32(<vscale x 2 x i8> %val, i8* %ptr, i32 %strided, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv4i8.p0i8.i32(<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define void @strided_vpstore_nxv4i8(<vscale x 4 x i8> %val, i8* %ptr, i32 signext %strided, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv4i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv4i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv4i8.p0i8.i32(<vscale x 4 x i8> %val, i8* %ptr, i32 %strided, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv8i8.p0i8.i32(<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define void @strided_vpstore_nxv8i8(<vscale x 8 x i8> %val, i8* %ptr, i32 signext %strided, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv8i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv8i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv8i8.p0i8.i32(<vscale x 8 x i8> %val, i8* %ptr, i32 %strided, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv1i16.p0i16.i32(<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define void @strided_vpstore_nxv1i16(<vscale x 1 x i16> %val, i16* %ptr, i32 signext %strided, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv1i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv1i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv1i16.p0i16.i32(<vscale x 1 x i16> %val, i16* %ptr, i32 %strided, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv2i16.p0i16.i32(<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define void @strided_vpstore_nxv2i16(<vscale x 2 x i16> %val, i16* %ptr, i32 signext %strided, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv2i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv2i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv2i16.p0i16.i32(<vscale x 2 x i16> %val, i16* %ptr, i32 %strided, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv4i16.p0i16.i32(<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define void @strided_vpstore_nxv4i16(<vscale x 4 x i16> %val, i16* %ptr, i32 signext %strided, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv4i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv4i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv4i16.p0i16.i32(<vscale x 4 x i16> %val, i16* %ptr, i32 %strided, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv8i16.p0i16.i32(<vscale x 8 x i16>, i16*, i32, <vscale x 8 x i1>, i32)
+
+define void @strided_vpstore_nxv8i16(<vscale x 8 x i16> %val, i16* %ptr, i32 signext %strided, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv8i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv8i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv8i16.p0i16.i32(<vscale x 8 x i16> %val, i16* %ptr, i32 %strided, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv1i32.p0i32.i32(<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define void @strided_vpstore_nxv1i32(<vscale x 1 x i32> %val, i32* %ptr, i32 signext %strided, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv1i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv1i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv1i32.p0i32.i32(<vscale x 1 x i32> %val, i32* %ptr, i32 %strided, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv2i32.p0i32.i32(<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define void @strided_vpstore_nxv2i32(<vscale x 2 x i32> %val, i32* %ptr, i32 signext %strided, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv2i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv2i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv2i32.p0i32.i32(<vscale x 2 x i32> %val, i32* %ptr, i32 %strided, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv4i32.p0i32.i32(<vscale x 4 x i32>, i32*, i32, <vscale x 4 x i1>, i32)
+
+define void @strided_vpstore_nxv4i32(<vscale x 4 x i32> %val, i32* %ptr, i32 signext %strided, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv4i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv4i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv4i32.p0i32.i32(<vscale x 4 x i32> %val, i32* %ptr, i32 %strided, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv8i32.p0i32.i32(<vscale x 8 x i32>, i32*, i32, <vscale x 8 x i1>, i32)
+
+define void @strided_vpstore_nxv8i32(<vscale x 8 x i32> %val, i32* %ptr, i32 signext %strided, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv8i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv8i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv8i32.p0i32.i32(<vscale x 8 x i32> %val, i32* %ptr, i32 %strided, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv1i64.p0i64.i32(<vscale x 1 x i64>, i64*, i32, <vscale x 1 x i1>, i32)
+
+define void @strided_vpstore_nxv1i64(<vscale x 1 x i64> %val, i64* %ptr, i32 signext %strided, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv1i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv1i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv1i64.p0i64.i32(<vscale x 1 x i64> %val, i64* %ptr, i32 %strided, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv2i64.p0i64.i32(<vscale x 2 x i64>, i64*, i32, <vscale x 2 x i1>, i32)
+
+define void @strided_vpstore_nxv2i64(<vscale x 2 x i64> %val, i64* %ptr, i32 signext %strided, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv2i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv2i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv2i64.p0i64.i32(<vscale x 2 x i64> %val, i64* %ptr, i32 %strided, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv4i64.p0i64.i32(<vscale x 4 x i64>, i64*, i32, <vscale x 4 x i1>, i32)
+
+define void @strided_vpstore_nxv4i64(<vscale x 4 x i64> %val, i64* %ptr, i32 signext %strided, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv4i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv4i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv4i64.p0i64.i32(<vscale x 4 x i64> %val, i64* %ptr, i32 %strided, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv8i64.p0i64.i32(<vscale x 8 x i64>, i64*, i32, <vscale x 8 x i1>, i32)
+
+define void @strided_vpstore_nxv8i64(<vscale x 8 x i64> %val, i64* %ptr, i32 signext %strided, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv8i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv8i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv8i64.p0i64.i32(<vscale x 8 x i64> %val, i64* %ptr, i32 %strided, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv1f16.p0f16.i32(<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define void @strided_vpstore_nxv1f16(<vscale x 1 x half> %val, half* %ptr, i32 signext %strided, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv1f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv1f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv1f16.p0f16.i32(<vscale x 1 x half> %val, half* %ptr, i32 %strided, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv2f16.p0f16.i32(<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define void @strided_vpstore_nxv2f16(<vscale x 2 x half> %val, half* %ptr, i32 signext %strided, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv2f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv2f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv2f16.p0f16.i32(<vscale x 2 x half> %val, half* %ptr, i32 %strided, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv4f16.p0f16.i32(<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define void @strided_vpstore_nxv4f16(<vscale x 4 x half> %val, half* %ptr, i32 signext %strided, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv4f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv4f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv4f16.p0f16.i32(<vscale x 4 x half> %val, half* %ptr, i32 %strided, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv8f16.p0f16.i32(<vscale x 8 x half>, half*, i32, <vscale x 8 x i1>, i32)
+
+define void @strided_vpstore_nxv8f16(<vscale x 8 x half> %val, half* %ptr, i32 signext %strided, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv8f16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV32-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv8f16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV64-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv8f16.p0f16.i32(<vscale x 8 x half> %val, half* %ptr, i32 %strided, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv1f32.p0f32.i32(<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define void @strided_vpstore_nxv1f32(<vscale x 1 x float> %val, float* %ptr, i32 signext %strided, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv1f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv1f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv1f32.p0f32.i32(<vscale x 1 x float> %val, float* %ptr, i32 %strided, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv2f32.p0f32.i32(<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define void @strided_vpstore_nxv2f32(<vscale x 2 x float> %val, float* %ptr, i32 signext %strided, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv2f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv2f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv2f32.p0f32.i32(<vscale x 2 x float> %val, float* %ptr, i32 %strided, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv4f32.p0f32.i32(<vscale x 4 x float>, float*, i32, <vscale x 4 x i1>, i32)
+
+define void @strided_vpstore_nxv4f32(<vscale x 4 x float> %val, float* %ptr, i32 signext %strided, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv4f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv4f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv4f32.p0f32.i32(<vscale x 4 x float> %val, float* %ptr, i32 %strided, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv8f32.p0f32.i32(<vscale x 8 x float>, float*, i32, <vscale x 8 x i1>, i32)
+
+define void @strided_vpstore_nxv8f32(<vscale x 8 x float> %val, float* %ptr, i32 signext %strided, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv8f32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV32-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv8f32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV64-NEXT:    vsse32.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv8f32.p0f32.i32(<vscale x 8 x float> %val, float* %ptr, i32 %strided, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv1f64.p0f64.i32(<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define void @strided_vpstore_nxv1f64(<vscale x 1 x double> %val, double* %ptr, i32 signext %strided, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv1f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv1f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv1f64.p0f64.i32(<vscale x 1 x double> %val, double* %ptr, i32 %strided, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv2f64.p0f64.i32(<vscale x 2 x double>, double*, i32, <vscale x 2 x i1>, i32)
+
+define void @strided_vpstore_nxv2f64(<vscale x 2 x double> %val, double* %ptr, i32 signext %strided, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv2f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv2f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv2f64.p0f64.i32(<vscale x 2 x double> %val, double* %ptr, i32 %strided, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv4f64.p0f64.i32(<vscale x 4 x double>, double*, i32, <vscale x 4 x i1>, i32)
+
+define void @strided_vpstore_nxv4f64(<vscale x 4 x double> %val, double* %ptr, i32 signext %strided, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv4f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv4f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv4f64.p0f64.i32(<vscale x 4 x double> %val, double* %ptr, i32 %strided, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.experimental.vp.strided.store.nxv8f64.p0f64.i32(<vscale x 8 x double>, double*, i32, <vscale x 8 x i1>, i32)
+
+define void @strided_vpstore_nxv8f64(<vscale x 8 x double> %val, double* %ptr, i32 signext %strided, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv8f64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV32-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv8f64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV64-NEXT:    vsse64.v v8, (a0), a1, v0.t
+; CHECK-RV64-NEXT:    ret
+  call void @llvm.experimental.vp.strided.store.nxv8f64.p0f64.i32(<vscale x 8 x double> %val, double* %ptr, i32 %strided, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @strided_vpstore_nxv1i8_allones_mask(<vscale x 1 x i8> %val, i8* %ptr, i32 signext %strided, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_allones_mask:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT:    vsse8.v v8, (a0), a1
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_allones_mask:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT:    vsse8.v v8, (a0), a1
+; CHECK-RV64-NEXT:    ret
+  %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i32(<vscale x 1 x i8> %val, i8* %ptr, i32 %strided, <vscale x 1 x i1> %b, i32 %evl)
+  ret void
+}


        


More information about the llvm-commits mailing list