[llvm] f4dee8c - [RISCV][VP] Custom lower VP_SCATTER and VP_GATHER

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 7 02:52:59 PDT 2021


Author: Fraser Cormack
Date: 2021-09-07T10:43:07+01:00
New Revision: f4dee8cb82d199f843b0ca7e9846470f3912f593

URL: https://github.com/llvm/llvm-project/commit/f4dee8cb82d199f843b0ca7e9846470f3912f593
DIFF: https://github.com/llvm/llvm-project/commit/f4dee8cb82d199f843b0ca7e9846470f3912f593.diff

LOG: [RISCV][VP] Custom lower VP_SCATTER and VP_GATHER

This patch adds support for the `VP_SCATTER` and `VP_GATHER` nodes by
lowering them to RVV's `vsox`/`vlux` instructions, respectively. This
process is almost identical to the existing `MSCATTER`/`MGATHER` support.

One extra change was made to `SelectionDAGLegalize` so that
`VP_SCATTER`'s operation action is derived from its stored "value"
operand rather than its return type (which is always the chain).

Reviewed By: craig.topper, rogfer01

Differential Revision: https://reviews.llvm.org/D108987

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
    llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 2732c961c28fc..9518fef286e16 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -1164,6 +1164,11 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
     Action = TLI.getOperationAction(Node->getOpcode(),
                     cast<MaskedStoreSDNode>(Node)->getValue().getValueType());
     break;
+  case ISD::VP_SCATTER:
+    Action = TLI.getOperationAction(
+        Node->getOpcode(),
+        cast<VPScatterSDNode>(Node)->getValue().getValueType());
+    break;
   case ISD::VECREDUCE_FADD:
   case ISD::VECREDUCE_FMUL:
   case ISD::VECREDUCE_ADD:

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 94b1017311c65..a1e1912913594 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -557,6 +557,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::MGATHER, VT, Custom);
       setOperationAction(ISD::MSCATTER, VT, Custom);
 
+      setOperationAction(ISD::VP_GATHER, VT, Custom);
+      setOperationAction(ISD::VP_SCATTER, VT, Custom);
+
       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
@@ -621,6 +624,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::MGATHER, VT, Custom);
       setOperationAction(ISD::MSCATTER, VT, Custom);
 
+      setOperationAction(ISD::VP_GATHER, VT, Custom);
+      setOperationAction(ISD::VP_SCATTER, VT, Custom);
+
       setOperationAction(ISD::SELECT, VT, Custom);
       setOperationAction(ISD::SELECT_CC, VT, Expand);
 
@@ -730,6 +736,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
         setOperationAction(ISD::MSTORE, VT, Custom);
         setOperationAction(ISD::MGATHER, VT, Custom);
         setOperationAction(ISD::MSCATTER, VT, Custom);
+
+        setOperationAction(ISD::VP_GATHER, VT, Custom);
+        setOperationAction(ISD::VP_SCATTER, VT, Custom);
+
         setOperationAction(ISD::ADD, VT, Custom);
         setOperationAction(ISD::MUL, VT, Custom);
         setOperationAction(ISD::SUB, VT, Custom);
@@ -805,6 +815,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
         setOperationAction(ISD::MSTORE, VT, Custom);
         setOperationAction(ISD::MGATHER, VT, Custom);
         setOperationAction(ISD::MSCATTER, VT, Custom);
+
+        setOperationAction(ISD::VP_GATHER, VT, Custom);
+        setOperationAction(ISD::VP_SCATTER, VT, Custom);
+
         setOperationAction(ISD::FADD, VT, Custom);
         setOperationAction(ISD::FSUB, VT, Custom);
         setOperationAction(ISD::FMUL, VT, Custom);
@@ -873,6 +887,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setTargetDAGCombine(ISD::FCOPYSIGN);
     setTargetDAGCombine(ISD::MGATHER);
     setTargetDAGCombine(ISD::MSCATTER);
+    setTargetDAGCombine(ISD::VP_GATHER);
+    setTargetDAGCombine(ISD::VP_SCATTER);
     setTargetDAGCombine(ISD::SRA);
     setTargetDAGCombine(ISD::SRL);
     setTargetDAGCombine(ISD::SHL);
@@ -2714,9 +2730,11 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::FCOPYSIGN:
     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
   case ISD::MGATHER:
-    return lowerMGATHER(Op, DAG);
+  case ISD::VP_GATHER:
+    return lowerMaskedGather(Op, DAG);
   case ISD::MSCATTER:
-    return lowerMSCATTER(Op, DAG);
+  case ISD::VP_SCATTER:
+    return lowerMaskedScatter(Op, DAG);
   case ISD::FLT_ROUNDS_:
     return lowerGET_ROUNDING(Op, DAG);
   case ISD::SET_ROUNDING:
@@ -4688,36 +4706,56 @@ SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
 }
 
-// Custom lower MGATHER to a legalized form for RVV. It will then be matched to
-// a RVV indexed load. The RVV indexed load instructions only support the
-// "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
-// truncated to XLEN and are treated as byte offsets. Any signed or scaled
-// indexing is extended to the XLEN value type and scaled accordingly.
-SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
-  auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
+// Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
+// matched to a RVV indexed load. The RVV indexed load instructions only
+// support the "unsigned unscaled" addressing mode; indices are implicitly
+// zero-extended or truncated to XLEN and are treated as byte offsets. Any
+// signed or scaled indexing is extended to the XLEN value type and scaled
+// accordingly.
+SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
+                                               SelectionDAG &DAG) const {
   SDLoc DL(Op);
+  MVT VT = Op.getSimpleValueType();
 
-  SDValue Index = MGN->getIndex();
-  SDValue Mask = MGN->getMask();
-  SDValue PassThru = MGN->getPassThru();
+  const auto *MemSD = cast<MemSDNode>(Op.getNode());
+  EVT MemVT = MemSD->getMemoryVT();
+  MachineMemOperand *MMO = MemSD->getMemOperand();
+  SDValue Chain = MemSD->getChain();
+  SDValue BasePtr = MemSD->getBasePtr();
+
+  ISD::LoadExtType LoadExtType;
+  SDValue Index, Mask, PassThru, VL;
+
+  if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
+    Index = VPGN->getIndex();
+    Mask = VPGN->getMask();
+    PassThru = DAG.getUNDEF(VT);
+    VL = VPGN->getVectorLength();
+    // VP doesn't support extending loads.
+    LoadExtType = ISD::NON_EXTLOAD;
+  } else {
+    // Else it must be a MGATHER.
+    auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
+    Index = MGN->getIndex();
+    Mask = MGN->getMask();
+    PassThru = MGN->getPassThru();
+    LoadExtType = MGN->getExtensionType();
+  }
 
-  MVT VT = Op.getSimpleValueType();
   MVT IndexVT = Index.getSimpleValueType();
   MVT XLenVT = Subtarget.getXLenVT();
 
   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
          "Unexpected VTs!");
-  assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
-         "Unexpected pointer type");
+  assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
   // Targets have to explicitly opt-in for extending vector loads.
-  assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
-         "Unexpected extending MGATHER");
+  assert(LoadExtType == ISD::NON_EXTLOAD &&
+         "Unexpected extending MGATHER/VP_GATHER");
 
   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
   // the selection of the masked intrinsics doesn't do this for us.
   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
 
-  SDValue VL;
   MVT ContainerVT = VT;
   if (VT.isFixedLengthVector()) {
     // We need to use the larger of the result and index type to determine the
@@ -4740,18 +4778,17 @@ SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
     }
+  }
 
-    VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
-  } else
-    VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
+  if (!VL)
+    VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
 
   unsigned IntID =
       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
-  SmallVector<SDValue, 8> Ops{MGN->getChain(),
-                              DAG.getTargetConstant(IntID, DL, XLenVT)};
+  SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
   if (!IsUnmasked)
     Ops.push_back(PassThru);
-  Ops.push_back(MGN->getBasePtr());
+  Ops.push_back(BasePtr);
   Ops.push_back(Index);
   if (!IsUnmasked)
     Ops.push_back(Mask);
@@ -4759,9 +4796,8 @@ SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
 
   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
   SDValue Result =
-      DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
-                              MGN->getMemoryVT(), MGN->getMemOperand());
-  SDValue Chain = Result.getValue(1);
+      DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
+  Chain = Result.getValue(1);
 
   if (VT.isFixedLengthVector())
     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
@@ -4769,18 +4805,39 @@ SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
   return DAG.getMergeValues({Result, Chain}, DL);
 }
 
-// Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
-// a RVV indexed store. The RVV indexed store instructions only support the
-// "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
-// truncated to XLEN and are treated as byte offsets. Any signed or scaled
-// indexing is extended to the XLEN value type and scaled accordingly.
-SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
-                                           SelectionDAG &DAG) const {
-  auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
+// Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
+// matched to a RVV indexed store. The RVV indexed store instructions only
+// support the "unsigned unscaled" addressing mode; indices are implicitly
+// zero-extended or truncated to XLEN and are treated as byte offsets. Any
+// signed or scaled indexing is extended to the XLEN value type and scaled
+// accordingly.
+SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
+                                                SelectionDAG &DAG) const {
   SDLoc DL(Op);
-  SDValue Index = MSN->getIndex();
-  SDValue Mask = MSN->getMask();
-  SDValue Val = MSN->getValue();
+  const auto *MemSD = cast<MemSDNode>(Op.getNode());
+  EVT MemVT = MemSD->getMemoryVT();
+  MachineMemOperand *MMO = MemSD->getMemOperand();
+  SDValue Chain = MemSD->getChain();
+  SDValue BasePtr = MemSD->getBasePtr();
+
+  bool IsTruncatingStore = false;
+  SDValue Index, Mask, Val, VL;
+
+  if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
+    Index = VPSN->getIndex();
+    Mask = VPSN->getMask();
+    Val = VPSN->getValue();
+    VL = VPSN->getVectorLength();
+    // VP doesn't support truncating stores.
+    IsTruncatingStore = false;
+  } else {
+    // Else it must be a MSCATTER.
+    auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
+    Index = MSN->getIndex();
+    Mask = MSN->getMask();
+    Val = MSN->getValue();
+    IsTruncatingStore = MSN->isTruncatingStore();
+  }
 
   MVT VT = Val.getSimpleValueType();
   MVT IndexVT = Index.getSimpleValueType();
@@ -4788,21 +4845,19 @@ SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
 
   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
          "Unexpected VTs!");
-  assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
-         "Unexpected pointer type");
+  assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
   // Targets have to explicitly opt-in for extending vector loads and
   // truncating vector stores.
-  assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
+  assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
 
   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
   // the selection of the masked intrinsics doesn't do this for us.
   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
 
-  SDValue VL;
+  MVT ContainerVT = VT;
   if (VT.isFixedLengthVector()) {
     // We need to use the larger of the value and index type to determine the
     // scalable type to use so we don't increase LMUL for any operand/result.
-    MVT ContainerVT;
     if (VT.bitsGE(IndexVT)) {
       ContainerVT = getContainerForFixedLengthVector(VT);
       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
@@ -4821,24 +4876,23 @@ SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
     }
+  }
 
-    VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
-  } else
-    VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
+  if (!VL)
+    VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
 
   unsigned IntID =
       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
-  SmallVector<SDValue, 8> Ops{MSN->getChain(),
-                              DAG.getTargetConstant(IntID, DL, XLenVT)};
+  SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
   Ops.push_back(Val);
-  Ops.push_back(MSN->getBasePtr());
+  Ops.push_back(BasePtr);
   Ops.push_back(Index);
   if (!IsUnmasked)
     Ops.push_back(Mask);
   Ops.push_back(VL);
 
-  return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
-                                 MSN->getMemoryVT(), MSN->getMemOperand());
+  return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
+                                 DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
 }
 
 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
@@ -6336,18 +6390,33 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
   }
   case ISD::MGATHER:
-  case ISD::MSCATTER: {
+  case ISD::MSCATTER:
+  case ISD::VP_GATHER:
+  case ISD::VP_SCATTER: {
     if (!DCI.isBeforeLegalize())
       break;
-    MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
-    SDValue Index = MGSN->getIndex();
+    SDValue Index, ScaleOp;
+    bool IsIndexScaled = false;
+    bool IsIndexSigned = false;
+    if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
+      Index = VPGSN->getIndex();
+      ScaleOp = VPGSN->getScale();
+      IsIndexScaled = VPGSN->isIndexScaled();
+      IsIndexSigned = VPGSN->isIndexSigned();
+    } else {
+      const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
+      Index = MGSN->getIndex();
+      ScaleOp = MGSN->getScale();
+      IsIndexScaled = MGSN->isIndexScaled();
+      IsIndexSigned = MGSN->isIndexSigned();
+    }
     EVT IndexVT = Index.getValueType();
     MVT XLenVT = Subtarget.getXLenVT();
     // RISCV indexed loads only support the "unsigned unscaled" addressing
     // mode, so anything else must be manually legalized.
-    bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
-                                (MGSN->isIndexSigned() &&
-                                 IndexVT.getVectorElementType().bitsLT(XLenVT));
+    bool NeedsIdxLegalization =
+        IsIndexScaled ||
+        (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
     if (!NeedsIdxLegalization)
       break;
 
@@ -6356,36 +6425,48 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     // Any index legalization should first promote to XLenVT, so we don't lose
     // bits when scaling. This may create an illegal index type so we let
     // LLVM's legalization take care of the splitting.
+    // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
       IndexVT = IndexVT.changeVectorElementType(XLenVT);
-      Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
-                                                : ISD::ZERO_EXTEND,
+      Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
                           DL, IndexVT, Index);
     }
 
-    unsigned Scale = N->getConstantOperandVal(5);
-    if (MGSN->isIndexScaled() && Scale != 1) {
+    unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
+    if (IsIndexScaled && Scale != 1) {
       // Manually scale the indices by the element size.
       // TODO: Sanitize the scale operand here?
+      // TODO: For VP nodes, should we use VP_SHL here?
       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
     }
 
     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
-    if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
+    if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
+      return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
+                             {VPGN->getChain(), VPGN->getBasePtr(), Index,
+                              VPGN->getScale(), VPGN->getMask(),
+                              VPGN->getVectorLength()},
+                             VPGN->getMemOperand(), NewIndexTy);
+    if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
+      return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
+                              {VPSN->getChain(), VPSN->getValue(),
+                               VPSN->getBasePtr(), Index, VPSN->getScale(),
+                               VPSN->getMask(), VPSN->getVectorLength()},
+                              VPSN->getMemOperand(), NewIndexTy);
+    if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
       return DAG.getMaskedGather(
-          N->getVTList(), MGSN->getMemoryVT(), DL,
-          {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
-           MGSN->getBasePtr(), Index, MGN->getScale()},
+          N->getVTList(), MGN->getMemoryVT(), DL,
+          {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
+           MGN->getBasePtr(), Index, MGN->getScale()},
           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
-    }
     const auto *MSN = cast<MaskedScatterSDNode>(N);
     return DAG.getMaskedScatter(
-        N->getVTList(), MGSN->getMemoryVT(), DL,
-        {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
-         Index, MGSN->getScale()},
-        MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
+        N->getVTList(), MSN->getMemoryVT(), DL,
+        {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
+         Index, MSN->getScale()},
+        MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
   }
   case RISCVISD::SRA_VL:
   case RISCVISD::SRL_VL:

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 06f4235aeb922..e3b616b2335d8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -568,8 +568,8 @@ class RISCVTargetLowering : public TargetLowering {
   SDValue lowerMSTORE(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,
                                                SelectionDAG &DAG) const;
-  SDValue lowerMGATHER(SDValue Op, SelectionDAG &DAG) const;
-  SDValue lowerMSCATTER(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerMaskedGather(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerMaskedScatter(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
new file mode 100644
index 0000000000000..d19f2aa1e3d8d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -0,0 +1,1680 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*>, <2 x i1>, i32)
+
+define <2 x i8> @vpgather_v2i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret <2 x i8> %v
+}
+
+define <2 x i16> @vpgather_v2i8_sextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i8_sextload_v2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; RV32-NEXT:    vsext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i8_sextload_v2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; RV64-NEXT:    vsext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = sext <2 x i8> %v to <2 x i16>
+  ret <2 x i16> %ev
+}
+
+define <2 x i16> @vpgather_v2i8_zextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i8_zextload_v2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; RV32-NEXT:    vzext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i8_zextload_v2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; RV64-NEXT:    vzext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = zext <2 x i8> %v to <2 x i16>
+  ret <2 x i16> %ev
+}
+
+define <2 x i32> @vpgather_v2i8_sextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i8_sextload_v2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vsext.vf4 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i8_sextload_v2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vsext.vf4 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = sext <2 x i8> %v to <2 x i32>
+  ret <2 x i32> %ev
+}
+
+define <2 x i32> @vpgather_v2i8_zextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i8_zextload_v2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vzext.vf4 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i8_zextload_v2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vzext.vf4 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = zext <2 x i8> %v to <2 x i32>
+  ret <2 x i32> %ev
+}
+
+define <2 x i64> @vpgather_v2i8_sextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i8_sextload_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT:    vsext.vf8 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i8_sextload_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT:    vsext.vf8 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = sext <2 x i8> %v to <2 x i64>
+  ret <2 x i64> %ev
+}
+
+define <2 x i64> @vpgather_v2i8_zextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i8_zextload_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT:    vzext.vf8 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i8_zextload_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT:    vzext.vf8 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = zext <2 x i8> %v to <2 x i64>
+  ret <2 x i64> %ev
+}
+
+declare <4 x i8> @llvm.vp.gather.v4i8.v4p0i8(<4 x i8*>, <4 x i1>, i32)
+
+define <4 x i8> @vpgather_v4i8(<4 x i8*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v4i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v4i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <4 x i8> @llvm.vp.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret <4 x i8> %v
+}
+
+define <4 x i8> @vpgather_truemask_v4i8(<4 x i8*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_v4i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_v4i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret <4 x i8> %v
+}
+
+declare <8 x i8> @llvm.vp.gather.v8i8.v8p0i8(<8 x i8*>, <8 x i1>, i32)
+
+define <8 x i8> @vpgather_v8i8(<8 x i8*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v8i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v8i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <8 x i8> @llvm.vp.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i8> %v
+}
+
+define <8 x i8> @vpgather_baseidx_v8i8(i8* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v8
+; RV32-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v8
+; RV64-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %idxs
+  %v = call <8 x i8> @llvm.vp.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i8> %v
+}
+
+declare <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*>, <2 x i1>, i32)
+
+define <2 x i16> @vpgather_v2i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret <2 x i16> %v
+}
+
+define <2 x i32> @vpgather_v2i16_sextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i16_sextload_v2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vsext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i16_sextload_v2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vsext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = sext <2 x i16> %v to <2 x i32>
+  ret <2 x i32> %ev
+}
+
+define <2 x i32> @vpgather_v2i16_zextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i16_zextload_v2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vzext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i16_zextload_v2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vzext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = zext <2 x i16> %v to <2 x i32>
+  ret <2 x i32> %ev
+}
+
+define <2 x i64> @vpgather_v2i16_sextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i16_sextload_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT:    vsext.vf4 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i16_sextload_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT:    vsext.vf4 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = sext <2 x i16> %v to <2 x i64>
+  ret <2 x i64> %ev
+}
+
+define <2 x i64> @vpgather_v2i16_zextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i16_zextload_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT:    vzext.vf4 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i16_zextload_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT:    vzext.vf4 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = zext <2 x i16> %v to <2 x i64>
+  ret <2 x i64> %ev
+}
+
+declare <4 x i16> @llvm.vp.gather.v4i16.v4p0i16(<4 x i16*>, <4 x i1>, i32)
+
+define <4 x i16> @vpgather_v4i16(<4 x i16*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <4 x i16> @llvm.vp.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret <4 x i16> %v
+}
+
+define <4 x i16> @vpgather_truemask_v4i16(<4 x i16*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_v4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_v4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret <4 x i16> %v
+}
+
+declare <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*>, <8 x i1>, i32)
+
+define <8 x i16> @vpgather_v8i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i16> %v
+}
+
+define <8 x i16> @vpgather_baseidx_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i8_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v8
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i8_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v8
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs
+  %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i16> %v
+}
+
+define <8 x i16> @vpgather_baseidx_sext_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v8
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v8
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i16>
+  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs
+  %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i16> %v
+}
+
+define <8 x i16> @vpgather_baseidx_zext_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf4 v26, v8
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf8 v28, v8
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i16>
+  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs
+  %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i16> %v
+}
+
+define <8 x i16> @vpgather_baseidx_v8i16(i16* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v8
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v8
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs
+  %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i16> %v
+}
+
+declare <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*>, <2 x i1>, i32)
+
+define <2 x i32> @vpgather_v2i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret <2 x i32> %v
+}
+
+define <2 x i64> @vpgather_v2i32_sextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i32_sextload_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT:    vsext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i32_sextload_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT:    vsext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = sext <2 x i32> %v to <2 x i64>
+  ret <2 x i64> %ev
+}
+
+define <2 x i64> @vpgather_v2i32_zextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i32_zextload_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT:    vzext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i32_zextload_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT:    vzext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 %evl)
+  %ev = zext <2 x i32> %v to <2 x i64>
+  ret <2 x i64> %ev
+}
+
+declare <4 x i32> @llvm.vp.gather.v4i32.v4p0i32(<4 x i32*>, <4 x i1>, i32)
+
+define <4 x i32> @vpgather_v4i32(<4 x i32*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret <4 x i32> %v
+}
+
+define <4 x i32> @vpgather_truemask_v4i32(<4 x i32*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_v4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_v4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret <4 x i32> %v
+}
+
+declare <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*>, <8 x i1>, i32)
+
+define <8 x i32> @vpgather_v8i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v26, (zero), v8, v0.t
+; RV64-NEXT:    vmv2r.v v8, v26
+; RV64-NEXT:    ret
+  %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i32> %v
+}
+
+define <8 x i32> @vpgather_baseidx_v8i8_v8i32(i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i8_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i8_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs
+  %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i32> %v
+}
+
+define <8 x i32> @vpgather_baseidx_sext_v8i8_v8i32(i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs
+  %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i32> %v
+}
+
+define <8 x i32> @vpgather_baseidx_zext_v8i8_v8i32(i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf4 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf8 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs
+  %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i32> %v
+}
+
+define <8 x i32> @vpgather_baseidx_v8i16_v8i32(i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i16_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i16_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs
+  %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i32> %v
+}
+
+define <8 x i32> @vpgather_baseidx_sext_v8i16_v8i32(i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_v8i16_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_v8i16_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i16> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs
+  %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i32> %v
+}
+
+define <8 x i32> @vpgather_baseidx_zext_v8i16_v8i32(i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_v8i16_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf2 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_v8i16_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf4 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i16> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs
+  %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i32> %v
+}
+
+define <8 x i32> @vpgather_baseidx_v8i32(i32* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsll.vi v26, v8, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf2 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs
+  %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i32> %v
+}
+
+declare <2 x i64> @llvm.vp.gather.v2i64.v2p0i64(<2 x i64*>, <2 x i1>, i32)
+
+define <2 x i64> @vpgather_v2i64(<2 x i64*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <2 x i64> @llvm.vp.gather.v2i64.v2p0i64(<2 x i64*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret <2 x i64> %v
+}
+
+declare <4 x i64> @llvm.vp.gather.v4i64.v4p0i64(<4 x i64*>, <4 x i1>, i32)
+
+define <4 x i64> @vpgather_v4i64(<4 x i64*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v26, (zero), v8, v0.t
+; RV32-NEXT:    vmv2r.v v8, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <4 x i64> @llvm.vp.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret <4 x i64> %v
+}
+
+define <4 x i64> @vpgather_truemask_v4i64(<4 x i64*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v26, (zero), v8
+; RV32-NEXT:    vmv2r.v v8, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret <4 x i64> %v
+}
+
+declare <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*>, <8 x i1>, i32)
+
+define <8 x i64> @vpgather_v8i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v28, (zero), v8, v0.t
+; RV32-NEXT:    vmv4r.v v8, v28
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+define <8 x i64> @vpgather_baseidx_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i8_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i8_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs
+  %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+define <8 x i64> @vpgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_v8i8_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf8 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+define <8 x i64> @vpgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_v8i8_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf8 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+define <8 x i64> @vpgather_baseidx_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i16_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i16_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs
+  %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+define <8 x i64> @vpgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_v8i16_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf4 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i16> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+define <8 x i64> @vpgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_v8i16_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf4 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i16> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+define <8 x i64> @vpgather_baseidx_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i32_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsll.vi v26, v8, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i32_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf2 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs
+  %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+define <8 x i64> @vpgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_v8i32_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf2 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i32> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+define <8 x i64> @vpgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_v8i32_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf2 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i32> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+define <8 x i64> @vpgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsll.vi v28, v8, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs
+  %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x i64> %v
+}
+
+declare <2 x half> @llvm.vp.gather.v2f16.v2p0f16(<2 x half*>, <2 x i1>, i32)
+
+define <2 x half> @vpgather_v2f16(<2 x half*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x half> @llvm.vp.gather.v2f16.v2p0f16(<2 x half*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret <2 x half> %v
+}
+
+declare <4 x half> @llvm.vp.gather.v4f16.v4p0f16(<4 x half*>, <4 x i1>, i32)
+
+define <4 x half> @vpgather_v4f16(<4 x half*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <4 x half> @llvm.vp.gather.v4f16.v4p0f16(<4 x half*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret <4 x half> %v
+}
+
+define <4 x half> @vpgather_truemask_v4f16(<4 x half*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_v4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_v4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %v = call <4 x half> @llvm.vp.gather.v4f16.v4p0f16(<4 x half*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret <4 x half> %v
+}
+
+declare <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*>, <8 x i1>, i32)
+
+define <8 x half> @vpgather_v8f16(<8 x half*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x half> %v
+}
+
+define <8 x half> @vpgather_baseidx_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i8_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v8
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i8_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v8
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs
+  %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x half> %v
+}
+
+define <8 x half> @vpgather_baseidx_sext_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v8
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v8
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i16>
+  %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs
+  %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x half> %v
+}
+
+define <8 x half> @vpgather_baseidx_zext_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf4 v26, v8
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf8 v28, v8
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i16>
+  %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs
+  %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x half> %v
+}
+
+define <8 x half> @vpgather_baseidx_v8f16(half* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v8
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v8
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs
+  %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x half> %v
+}
+
+declare <2 x float> @llvm.vp.gather.v2f32.v2p0f32(<2 x float*>, <2 x i1>, i32)
+
+define <2 x float> @vpgather_v2f32(<2 x float*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <2 x float> @llvm.vp.gather.v2f32.v2p0f32(<2 x float*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret <2 x float> %v
+}
+
+declare <4 x float> @llvm.vp.gather.v4f32.v4p0f32(<4 x float*>, <4 x i1>, i32)
+
+define <4 x float> @vpgather_v4f32(<4 x float*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <4 x float> @llvm.vp.gather.v4f32.v4p0f32(<4 x float*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret <4 x float> %v
+}
+
+define <4 x float> @vpgather_truemask_v4f32(<4 x float*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_v4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_v4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %v = call <4 x float> @llvm.vp.gather.v4f32.v4p0f32(<4 x float*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret <4 x float> %v
+}
+
+declare <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*>, <8 x i1>, i32)
+
+define <8 x float> @vpgather_v8f32(<8 x float*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v26, (zero), v8, v0.t
+; RV64-NEXT:    vmv2r.v v8, v26
+; RV64-NEXT:    ret
+  %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x float> %v
+}
+
+define <8 x float> @vpgather_baseidx_v8i8_v8f32(float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i8_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i8_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs
+  %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x float> %v
+}
+
+define <8 x float> @vpgather_baseidx_sext_v8i8_v8f32(float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs
+  %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x float> %v
+}
+
+define <8 x float> @vpgather_baseidx_zext_v8i8_v8f32(float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf4 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf8 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs
+  %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x float> %v
+}
+
+define <8 x float> @vpgather_baseidx_v8i16_v8f32(float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i16_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i16_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs
+  %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x float> %v
+}
+
+define <8 x float> @vpgather_baseidx_sext_v8i16_v8f32(float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_v8i16_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_v8i16_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i16> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs
+  %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x float> %v
+}
+
+define <8 x float> @vpgather_baseidx_zext_v8i16_v8f32(float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_v8i16_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf2 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_v8i16_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf4 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i16> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs
+  %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x float> %v
+}
+
+define <8 x float> @vpgather_baseidx_v8f32(float* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsll.vi v26, v8, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf2 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs
+  %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x float> %v
+}
+
+declare <2 x double> @llvm.vp.gather.v2f64.v2p0f64(<2 x double*>, <2 x i1>, i32)
+
+define <2 x double> @vpgather_v2f64(<2 x double*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <2 x double> @llvm.vp.gather.v2f64.v2p0f64(<2 x double*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret <2 x double> %v
+}
+
+declare <4 x double> @llvm.vp.gather.v4f64.v4p0f64(<4 x double*>, <4 x i1>, i32)
+
+define <4 x double> @vpgather_v4f64(<4 x double*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v26, (zero), v8, v0.t
+; RV32-NEXT:    vmv2r.v v8, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <4 x double> @llvm.vp.gather.v4f64.v4p0f64(<4 x double*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret <4 x double> %v
+}
+
+define <4 x double> @vpgather_truemask_v4f64(<4 x double*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_v4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v26, (zero), v8
+; RV32-NEXT:    vmv2r.v v8, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_v4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  %v = call <4 x double> @llvm.vp.gather.v4f64.v4p0f64(<4 x double*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret <4 x double> %v
+}
+
+declare <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*>, <8 x i1>, i32)
+
+define <8 x double> @vpgather_v8f64(<8 x double*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v28, (zero), v8, v0.t
+; RV32-NEXT:    vmv4r.v v8, v28
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %v
+}
+
+define <8 x double> @vpgather_baseidx_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i8_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i8_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs
+  %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %v
+}
+
+define <8 x double> @vpgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_v8i8_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf8 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %v
+}
+
+define <8 x double> @vpgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_v8i8_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf8 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %v
+}
+
+define <8 x double> @vpgather_baseidx_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i16_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v8
+; RV32-NEXT:    vsll.vi v26, v26, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i16_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs
+  %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %v
+}
+
+define <8 x double> @vpgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_v8i16_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf4 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i16> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %v
+}
+
+define <8 x double> @vpgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_v8i16_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf4 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i16> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %v
+}
+
+define <8 x double> @vpgather_baseidx_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_v8i32_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsll.vi v26, v8, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_v8i32_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf2 v28, v8
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs
+  %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %v
+}
+
+define <8 x double> @vpgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_v8i32_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf2 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i32> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %v
+}
+
+define <8 x double> @vpgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_v8i32_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf2 v28, v8
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i32> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %v
+}
+
+define <8 x double> @vpgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsll.vi v28, v8, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs
+  %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret <8 x double> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
new file mode 100644
index 0000000000000..1a4ffa0fb62ef
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
@@ -0,0 +1,1530 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8>, <2 x i8*>, <2 x i1>, i32)
+
+define void @vpscatter_v2i8(<2 x i8> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2i16_truncstore_v2i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <2 x i16> %val to <2 x i8>
+  call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2i32_truncstore_v2i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v25, 0
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v25, 0
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <2 x i32> %val to <2 x i8>
+  call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2i64_truncstore_v2i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v25, 0
+; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v25, 0
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v25, 0
+; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v25, 0
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <2 x i64> %val to <2 x i8>
+  call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v4i8.v4p0i8(<4 x i8>, <4 x i8*>, <4 x i1>, i32)
+
+define void @vpscatter_v4i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v4i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v4i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_v4i8(<4 x i8> %val, <4 x i8*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_v4i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_v4i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8>, <8 x i8*>, <8 x i1>, i32)
+
+define void @vpscatter_v8i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v8i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v8i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i8(<8 x i8> %val, i8* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v9
+; RV32-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v9
+; RV64-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %idxs
+  call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16>, <2 x i16*>, <2 x i1>, i32)
+
+define void @vpscatter_v2i16(<2 x i16> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2i32_truncstore_v2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <2 x i32> %val to <2 x i16>
+  call void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16> %tval, <2 x i16*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2i64_truncstore_v2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v25, 0
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v25, 0
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <2 x i64> %val to <2 x i16>
+  call void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16> %tval, <2 x i16*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v4i16.v4p0i16(<4 x i16>, <4 x i16*>, <4 x i1>, i32)
+
+define void @vpscatter_v4i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v4i16.v4p0i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_v4i16(<4 x i16> %val, <4 x i16*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_v4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_v4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.v4i16.v4p0i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16>, <8 x i16*>, <8 x i1>, i32)
+
+define void @vpscatter_v8i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i8_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v9
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i8_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v9
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs
+  call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v9
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v9
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i16>
+  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs
+  call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf4 v26, v9
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf8 v28, v9
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i16>
+  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs
+  call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i16(<8 x i16> %val, i16* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v9
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v9
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs
+  call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v2i32.v2p0i32(<2 x i32>, <2 x i32*>, <2 x i1>, i32)
+
+define void @vpscatter_v2i32(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v2i32.v2p0i32(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2i64_truncstore_v2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <2 x i64> %val to <2 x i32>
+  call void @llvm.vp.scatter.v2i32.v2p0i32(<2 x i32> %tval, <2 x i32*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, <4 x i1>, i32)
+
+define void @vpscatter_v4i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_v4i32(<4 x i32> %val, <4 x i32*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_v4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_v4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32>, <8 x i32*>, <8 x i1>, i32)
+
+define void @vpscatter_v8i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i8_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i8_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs
+  call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs
+  call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf4 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf8 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs
+  call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i16_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i16_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs
+  call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_v8i16_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_v8i16_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i16> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs
+  call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_v8i16_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf2 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_v8i16_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf4 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i16> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs
+  call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i32(<8 x i32> %val, i32* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsll.vi v26, v10, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf2 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs
+  call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v2i64.v2p0i64(<2 x i64>, <2 x i64*>, <2 x i1>, i32)
+
+define void @vpscatter_v2i64(<2 x i64> %val, <2 x i64*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v2i64.v2p0i64(<2 x i64> %val, <2 x i64*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v4i64.v4p0i64(<4 x i64>, <4 x i64*>, <4 x i1>, i32)
+
+define void @vpscatter_v4i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v4i64.v4p0i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_v4i64(<4 x i64> %val, <4 x i64*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_v4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_v4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.v4i64.v4p0i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64>, <8 x i64*>, <8 x i1>, i32)
+
+define void @vpscatter_v8i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v12, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i8_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v12
+; RV32-NEXT:    vsll.vi v26, v26, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i8_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v12
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs
+  call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_v8i8_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf8 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_v8i8_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf8 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i16_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v12
+; RV32-NEXT:    vsll.vi v26, v26, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i16_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v12
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs
+  call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_v8i16_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf4 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i16> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_v8i16_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf4 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i16> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i32_v8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsll.vi v26, v12, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i32_v8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf2 v28, v12
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs
+  call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_v8i32_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf2 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i32> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_v8i32_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf2 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i32> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsll.vi v28, v12, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs
+  call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v2f16.v2p0f16(<2 x half>, <2 x half*>, <2 x i1>, i32)
+
+define void @vpscatter_v2f16(<2 x half> %val, <2 x half*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v2f16.v2p0f16(<2 x half> %val, <2 x half*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v4f16.v4p0f16(<4 x half>, <4 x half*>, <4 x i1>, i32)
+
+define void @vpscatter_v4f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v4f16.v4p0f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_v4f16(<4 x half> %val, <4 x half*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_v4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_v4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.v4f16.v4p0f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half>, <8 x half*>, <8 x i1>, i32)
+
+define void @vpscatter_v8f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i8_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v9
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i8_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v9
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs
+  call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v9
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v9
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i16>
+  %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs
+  call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf4 v26, v9
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf8 v28, v9
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i16>
+  %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs
+  call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8f16(<8 x half> %val, half* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v9
+; RV32-NEXT:    vadd.vv v26, v26, v26
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v9
+; RV64-NEXT:    vadd.vv v28, v28, v28
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs
+  call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v2f32.v2p0f32(<2 x float>, <2 x float*>, <2 x i1>, i32)
+
+define void @vpscatter_v2f32(<2 x float> %val, <2 x float*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v2f32.v2p0f32(<2 x float> %val, <2 x float*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v4f32.v4p0f32(<4 x float>, <4 x float*>, <4 x i1>, i32)
+
+define void @vpscatter_v4f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v4f32.v4p0f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_v4f32(<4 x float> %val, <4 x float*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_v4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_v4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.v4f32.v4p0f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float>, <8 x float*>, <8 x i1>, i32)
+
+define void @vpscatter_v8f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i8_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i8_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs
+  call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs
+  call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf4 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf8 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs
+  call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i16_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i16_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs
+  call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_v8i16_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_v8i16_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <8 x i16> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs
+  call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_v8i16_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vzext.vf2 v26, v10
+; RV32-NEXT:    vsll.vi v26, v26, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_v8i16_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vzext.vf4 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <8 x i16> %idxs to <8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs
+  call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8f32(<8 x float> %val, float* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsll.vi v26, v10, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf2 v28, v10
+; RV64-NEXT:    vsll.vi v28, v28, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs
+  call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v2f64.v2p0f64(<2 x double>, <2 x double*>, <2 x i1>, i32)
+
+define void @vpscatter_v2f64(<2 x double> %val, <2 x double*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, <2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v4f64.v4p0f64(<4 x double>, <4 x double*>, <4 x i1>, i32)
+
+define void @vpscatter_v4f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v4f64.v4p0f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_v4f64(<4 x double> %val, <4 x double*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_v4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_v4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10
+; RV64-NEXT:    ret
+  %mhead = insertelement <4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.v4f64.v4p0f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double>, <8 x double*>, <8 x i1>, i32)
+
+define void @vpscatter_v8f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v12, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i8_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v26, v12
+; RV32-NEXT:    vsll.vi v26, v26, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i8_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf8 v28, v12
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs
+  call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_v8i8_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf8 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i8> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_v8i8_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf8 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i8> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i16_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v26, v12
+; RV32-NEXT:    vsll.vi v26, v26, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i16_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf4 v28, v12
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs
+  call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_v8i16_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf4 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i16> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_v8i16_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf4 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i16> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_v8i32_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; RV32-NEXT:    vsll.vi v26, v12, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_v8i32_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vsext.vf2 v28, v12
+; RV64-NEXT:    vsll.vi v28, v28, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs
+  call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_v8i32_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsext.vf2 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <8 x i32> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_v8i32_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vzext.vf2 v28, v12
+; CHECK-NEXT:    vsll.vi v28, v28, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <8 x i32> %idxs to <8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs
+  call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT:    vsll.vi v28, v12, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; CHECK-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs
+  call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
new file mode 100644
index 0000000000000..d383790954c12
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -0,0 +1,1816 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i8> @llvm.vp.gather.nxv1i8.nxv1p0i8(<vscale x 1 x i8*>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @vpgather_nxv1i8(<vscale x 1 x i8*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv1i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv1i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 1 x i8> @llvm.vp.gather.nxv1i8.nxv1p0i8(<vscale x 1 x i8*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i8> %v
+}
+
+declare <vscale x 2 x i8> @llvm.vp.gather.nxv2i8.nxv2p0i8(<vscale x 2 x i8*>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @vpgather_nxv2i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.gather.nxv2i8.nxv2p0i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i16> @vpgather_nxv2i8_sextload_nxv2i16(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; RV32-NEXT:    vsext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; RV64-NEXT:    vsext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.gather.nxv2i8.nxv2p0i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = sext <vscale x 2 x i8> %v to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %ev
+}
+
+define <vscale x 2 x i16> @vpgather_nxv2i8_zextload_nxv2i16(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; RV32-NEXT:    vzext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; RV64-NEXT:    vzext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.gather.nxv2i8.nxv2p0i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = zext <vscale x 2 x i8> %v to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %ev
+}
+
+define <vscale x 2 x i32> @vpgather_nxv2i8_sextload_nxv2i32(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV32-NEXT:    vsext.vf4 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV64-NEXT:    vsext.vf4 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.gather.nxv2i8.nxv2p0i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = sext <vscale x 2 x i8> %v to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %ev
+}
+
+define <vscale x 2 x i32> @vpgather_nxv2i8_zextload_nxv2i32(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV32-NEXT:    vzext.vf4 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV64-NEXT:    vzext.vf4 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.gather.nxv2i8.nxv2p0i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = zext <vscale x 2 x i8> %v to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %ev
+}
+
+define <vscale x 2 x i64> @vpgather_nxv2i8_sextload_nxv2i64(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vsext.vf8 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vsext.vf8 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.gather.nxv2i8.nxv2p0i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = sext <vscale x 2 x i8> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %ev
+}
+
+define <vscale x 2 x i64> @vpgather_nxv2i8_zextload_nxv2i64(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vzext.vf8 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vzext.vf8 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.gather.nxv2i8.nxv2p0i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = zext <vscale x 2 x i8> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %ev
+}
+
+declare <vscale x 4 x i8> @llvm.vp.gather.nxv4i8.nxv4p0i8(<vscale x 4 x i8*>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @vpgather_nxv4i8(<vscale x 4 x i8*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv4i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv4i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 4 x i8> @llvm.vp.gather.nxv4i8.nxv4p0i8(<vscale x 4 x i8*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vpgather_truemask_nxv4i8(<vscale x 4 x i8*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_nxv4i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_nxv4i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x i8> @llvm.vp.gather.nxv4i8.nxv4p0i8(<vscale x 4 x i8*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret <vscale x 4 x i8> %v
+}
+
+declare <vscale x 8 x i8> @llvm.vp.gather.nxv8i8.nxv8p0i8(<vscale x 8 x i8*>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @vpgather_nxv8i8(<vscale x 8 x i8*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv8i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv8i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 8 x i8> @llvm.vp.gather.nxv8i8.nxv8p0i8(<vscale x 8 x i8*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vpgather_baseidx_nxv8i8(i8* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v8
+; RV32-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i8, i8* %base, <vscale x 8 x i8> %idxs
+  %v = call <vscale x 8 x i8> @llvm.vp.gather.nxv8i8.nxv8p0i8(<vscale x 8 x i8*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i8> %v
+}
+
+declare <vscale x 1 x i16> @llvm.vp.gather.nxv1i16.nxv1p0i16(<vscale x 1 x i16*>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @vpgather_nxv1i16(<vscale x 1 x i16*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv1i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv1i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 1 x i16> @llvm.vp.gather.nxv1i16.nxv1p0i16(<vscale x 1 x i16*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i16> %v
+}
+
+declare <vscale x 2 x i16> @llvm.vp.gather.nxv2i16.nxv2p0i16(<vscale x 2 x i16*>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @vpgather_nxv2i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i16> @llvm.vp.gather.nxv2i16.nxv2p0i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i32> @vpgather_nxv2i16_sextload_nxv2i32(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i16_sextload_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV32-NEXT:    vsext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i16_sextload_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV64-NEXT:    vsext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i16> @llvm.vp.gather.nxv2i16.nxv2p0i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = sext <vscale x 2 x i16> %v to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %ev
+}
+
+define <vscale x 2 x i32> @vpgather_nxv2i16_zextload_nxv2i32(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i16_zextload_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV32-NEXT:    vzext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i16_zextload_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV64-NEXT:    vzext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i16> @llvm.vp.gather.nxv2i16.nxv2p0i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = zext <vscale x 2 x i16> %v to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %ev
+}
+
+define <vscale x 2 x i64> @vpgather_nxv2i16_sextload_nxv2i64(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i16_sextload_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vsext.vf4 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i16_sextload_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vsext.vf4 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i16> @llvm.vp.gather.nxv2i16.nxv2p0i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = sext <vscale x 2 x i16> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %ev
+}
+
+define <vscale x 2 x i64> @vpgather_nxv2i16_zextload_nxv2i64(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i16_zextload_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vzext.vf4 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i16_zextload_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vzext.vf4 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i16> @llvm.vp.gather.nxv2i16.nxv2p0i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = zext <vscale x 2 x i16> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %ev
+}
+
+declare <vscale x 4 x i16> @llvm.vp.gather.nxv4i16.nxv4p0i16(<vscale x 4 x i16*>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @vpgather_nxv4i16(<vscale x 4 x i16*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 4 x i16> @llvm.vp.gather.nxv4i16.nxv4p0i16(<vscale x 4 x i16*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vpgather_truemask_nxv4i16(<vscale x 4 x i16*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_nxv4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_nxv4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x i16> @llvm.vp.gather.nxv4i16.nxv4p0i16(<vscale x 4 x i16*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret <vscale x 4 x i16> %v
+}
+
+declare <vscale x 8 x i16> @llvm.vp.gather.nxv8i16.nxv8p0i16(<vscale x 8 x i16*>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @vpgather_nxv8i16(<vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v26, (zero), v8, v0.t
+; RV32-NEXT:    vmv2r.v v8, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v26, (zero), v8, v0.t
+; RV64-NEXT:    vmv2r.v v8, v26
+; RV64-NEXT:    ret
+  %v = call <vscale x 8 x i16> @llvm.vp.gather.nxv8i16.nxv8p0i16(<vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vpgather_baseidx_nxv8i8_nxv8i16(i16* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v8
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i8> %idxs
+  %v = call <vscale x 8 x i16> @llvm.vp.gather.nxv8i16.nxv8p0i16(<vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vpgather_baseidx_sext_nxv8i8_nxv8i16(i16* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v8
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
+  %ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i16> %eidxs
+  %v = call <vscale x 8 x i16> @llvm.vp.gather.nxv8i16.nxv8p0i16(<vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vpgather_baseidx_zext_nxv8i8_nxv8i16(i16* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf4 v28, v8
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf8 v16, v8
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
+  %ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i16> %eidxs
+  %v = call <vscale x 8 x i16> @llvm.vp.gather.nxv8i16.nxv8p0i16(<vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vpgather_baseidx_nxv8i16(i16* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v8
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v8
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i16> %idxs
+  %v = call <vscale x 8 x i16> @llvm.vp.gather.nxv8i16.nxv8p0i16(<vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i16> %v
+}
+
+declare <vscale x 1 x i32> @llvm.vp.gather.nxv1i32.nxv1p0i32(<vscale x 1 x i32*>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @vpgather_nxv1i32(<vscale x 1 x i32*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv1i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv1i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 1 x i32> @llvm.vp.gather.nxv1i32.nxv1p0i32(<vscale x 1 x i32*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i32> %v
+}
+
+declare <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0i32(<vscale x 2 x i32*>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @vpgather_nxv2i32(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0i32(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i64> @vpgather_nxv2i32_sextload_nxv2i64(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i32_sextload_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vsext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i32_sextload_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vsext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0i32(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = sext <vscale x 2 x i32> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %ev
+}
+
+define <vscale x 2 x i64> @vpgather_nxv2i32_zextload_nxv2i64(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i32_zextload_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vzext.vf2 v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i32_zextload_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vzext.vf2 v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0i32(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  %ev = zext <vscale x 2 x i32> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %ev
+}
+
+declare <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0i32(<vscale x 4 x i32*>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @vpgather_nxv4i32(<vscale x 4 x i32*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v26, (zero), v8, v0.t
+; RV64-NEXT:    vmv2r.v v8, v26
+; RV64-NEXT:    ret
+  %v = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0i32(<vscale x 4 x i32*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vpgather_truemask_nxv4i32(<vscale x 4 x i32*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v26, (zero), v8
+; RV64-NEXT:    vmv2r.v v8, v26
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0i32(<vscale x 4 x i32*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret <vscale x 4 x i32> %v
+}
+
+declare <vscale x 8 x i32> @llvm.vp.gather.nxv8i32.nxv8p0i32(<vscale x 8 x i32*>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @vpgather_nxv8i32(<vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v28, (zero), v8, v0.t
+; RV64-NEXT:    vmv4r.v v8, v28
+; RV64-NEXT:    ret
+  %v = call <vscale x 8 x i32> @llvm.vp.gather.nxv8i32.nxv8p0i32(<vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vpgather_baseidx_nxv8i8_nxv8i32(i32* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i8> %idxs
+  %v = call <vscale x 8 x i32> @llvm.vp.gather.nxv8i32.nxv8p0i32(<vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vpgather_baseidx_sext_nxv8i8_nxv8i32(i32* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
+  %v = call <vscale x 8 x i32> @llvm.vp.gather.nxv8i32.nxv8p0i32(<vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vpgather_baseidx_zext_nxv8i8_nxv8i32(i32* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf4 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf8 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
+  %v = call <vscale x 8 x i32> @llvm.vp.gather.nxv8i32.nxv8p0i32(<vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vpgather_baseidx_nxv8i16_nxv8i32(i32* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i16> %idxs
+  %v = call <vscale x 8 x i32> @llvm.vp.gather.nxv8i32.nxv8p0i32(<vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vpgather_baseidx_sext_nxv8i16_nxv8i32(i32* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
+  %v = call <vscale x 8 x i32> @llvm.vp.gather.nxv8i32.nxv8p0i32(<vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vpgather_baseidx_zext_nxv8i16_nxv8i32(i32* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf2 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf4 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
+  %v = call <vscale x 8 x i32> @llvm.vp.gather.nxv8i32.nxv8p0i32(<vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vpgather_baseidx_nxv8i32(i32* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsll.vi v28, v8, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf2 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %idxs
+  %v = call <vscale x 8 x i32> @llvm.vp.gather.nxv8i32.nxv8p0i32(<vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i32> %v
+}
+
+declare <vscale x 1 x i64> @llvm.vp.gather.nxv1i64.nxv1p0i64(<vscale x 1 x i64*>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i64> @vpgather_nxv1i64(<vscale x 1 x i64*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 1 x i64> @llvm.vp.gather.nxv1i64.nxv1p0i64(<vscale x 1 x i64*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x i64> %v
+}
+
+declare <vscale x 2 x i64> @llvm.vp.gather.nxv2i64.nxv2p0i64(<vscale x 2 x i64*>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i64> @vpgather_nxv2i64(<vscale x 2 x i64*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v26, (zero), v8, v0.t
+; RV32-NEXT:    vmv2r.v v8, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x i64> @llvm.vp.gather.nxv2i64.nxv2p0i64(<vscale x 2 x i64*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i64> %v
+}
+
+declare <vscale x 4 x i64> @llvm.vp.gather.nxv4i64.nxv4p0i64(<vscale x 4 x i64*>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i64> @vpgather_nxv4i64(<vscale x 4 x i64*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v28, (zero), v8, v0.t
+; RV32-NEXT:    vmv4r.v v8, v28
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 4 x i64> @llvm.vp.gather.nxv4i64.nxv4p0i64(<vscale x 4 x i64*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vpgather_truemask_nxv4i64(<vscale x 4 x i64*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v28, (zero), v8
+; RV32-NEXT:    vmv4r.v v8, v28
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x i64> @llvm.vp.gather.nxv4i64.nxv4p0i64(<vscale x 4 x i64*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret <vscale x 4 x i64> %v
+}
+
+declare <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i64> @vpgather_nxv8i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v16, (zero), v8, v0.t
+; RV32-NEXT:    vmv8r.v v8, v16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vpgather_baseidx_nxv8i8_nxv8i64(i64* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vsll.vi v8, v16, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i8> %idxs
+  %v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf8 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf8 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vpgather_baseidx_nxv8i16_nxv8i64(i64* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v8
+; RV64-NEXT:    vsll.vi v8, v16, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i16> %idxs
+  %v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf4 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf4 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vpgather_baseidx_nxv8i32_nxv8i64(i64* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i32_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsll.vi v28, v8, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf2 v16, v8
+; RV64-NEXT:    vsll.vi v8, v16, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i32> %idxs
+  %v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf2 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf2 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vpgather_baseidx_nxv8i64(i64* %base, <vscale x 8 x i64> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsll.vi v8, v8, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %idxs
+  %v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i64> %v
+}
+
+declare <vscale x 1 x half> @llvm.vp.gather.nxv1f16.nxv1p0f16(<vscale x 1 x half*>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @vpgather_nxv1f16(<vscale x 1 x half*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv1f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv1f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 1 x half> @llvm.vp.gather.nxv1f16.nxv1p0f16(<vscale x 1 x half*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x half> %v
+}
+
+declare <vscale x 2 x half> @llvm.vp.gather.nxv2f16.nxv2p0f16(<vscale x 2 x half*>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @vpgather_nxv2f16(<vscale x 2 x half*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x half> @llvm.vp.gather.nxv2f16.nxv2p0f16(<vscale x 2 x half*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x half> %v
+}
+
+declare <vscale x 4 x half> @llvm.vp.gather.nxv4f16.nxv4p0f16(<vscale x 4 x half*>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @vpgather_nxv4f16(<vscale x 4 x half*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 4 x half> @llvm.vp.gather.nxv4f16.nxv4p0f16(<vscale x 4 x half*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x half> %v
+}
+
+define <vscale x 4 x half> @vpgather_truemask_nxv4f16(<vscale x 4 x half*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_nxv4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_nxv4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.gather.nxv4f16.nxv4p0f16(<vscale x 4 x half*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret <vscale x 4 x half> %v
+}
+
+declare <vscale x 8 x half> @llvm.vp.gather.nxv8f16.nxv8p0f16(<vscale x 8 x half*>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x half> @vpgather_nxv8f16(<vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v26, (zero), v8, v0.t
+; RV32-NEXT:    vmv2r.v v8, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v26, (zero), v8, v0.t
+; RV64-NEXT:    vmv2r.v v8, v26
+; RV64-NEXT:    ret
+  %v = call <vscale x 8 x half> @llvm.vp.gather.nxv8f16.nxv8p0f16(<vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x half> %v
+}
+
+define <vscale x 8 x half> @vpgather_baseidx_nxv8i8_nxv8f16(half* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v8
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i8> %idxs
+  %v = call <vscale x 8 x half> @llvm.vp.gather.nxv8f16.nxv8p0f16(<vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x half> %v
+}
+
+define <vscale x 8 x half> @vpgather_baseidx_sext_nxv8i8_nxv8f16(half* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v8
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
+  %ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i16> %eidxs
+  %v = call <vscale x 8 x half> @llvm.vp.gather.nxv8f16.nxv8p0f16(<vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x half> %v
+}
+
+define <vscale x 8 x half> @vpgather_baseidx_zext_nxv8i8_nxv8f16(half* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf4 v28, v8
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf8 v16, v8
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
+  %ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i16> %eidxs
+  %v = call <vscale x 8 x half> @llvm.vp.gather.nxv8f16.nxv8p0f16(<vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x half> %v
+}
+
+define <vscale x 8 x half> @vpgather_baseidx_nxv8f16(half* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v8
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v8
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i16> %idxs
+  %v = call <vscale x 8 x half> @llvm.vp.gather.nxv8f16.nxv8p0f16(<vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x half> %v
+}
+
+declare <vscale x 1 x float> @llvm.vp.gather.nxv1f32.nxv1p0f32(<vscale x 1 x float*>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @vpgather_nxv1f32(<vscale x 1 x float*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv1f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 1 x float> @llvm.vp.gather.nxv1f32.nxv1p0f32(<vscale x 1 x float*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x float> %v
+}
+
+declare <vscale x 2 x float> @llvm.vp.gather.nxv2f32.nxv2p0f32(<vscale x 2 x float*>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @vpgather_nxv2f32(<vscale x 2 x float*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v25, (zero), v8, v0.t
+; RV64-NEXT:    vmv1r.v v8, v25
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x float> @llvm.vp.gather.nxv2f32.nxv2p0f32(<vscale x 2 x float*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x float> %v
+}
+
+declare <vscale x 4 x float> @llvm.vp.gather.nxv4f32.nxv4p0f32(<vscale x 4 x float*>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x float> @vpgather_nxv4f32(<vscale x 4 x float*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v26, (zero), v8, v0.t
+; RV64-NEXT:    vmv2r.v v8, v26
+; RV64-NEXT:    ret
+  %v = call <vscale x 4 x float> @llvm.vp.gather.nxv4f32.nxv4p0f32(<vscale x 4 x float*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x float> %v
+}
+
+define <vscale x 4 x float> @vpgather_truemask_nxv4f32(<vscale x 4 x float*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_nxv4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_nxv4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v26, (zero), v8
+; RV64-NEXT:    vmv2r.v v8, v26
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.gather.nxv4f32.nxv4p0f32(<vscale x 4 x float*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret <vscale x 4 x float> %v
+}
+
+declare <vscale x 8 x float> @llvm.vp.gather.nxv8f32.nxv8p0f32(<vscale x 8 x float*>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x float> @vpgather_nxv8f32(<vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (zero), v8, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v28, (zero), v8, v0.t
+; RV64-NEXT:    vmv4r.v v8, v28
+; RV64-NEXT:    ret
+  %v = call <vscale x 8 x float> @llvm.vp.gather.nxv8f32.nxv8p0f32(<vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x float> %v
+}
+
+define <vscale x 8 x float> @vpgather_baseidx_nxv8i8_nxv8f32(float* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i8> %idxs
+  %v = call <vscale x 8 x float> @llvm.vp.gather.nxv8f32.nxv8p0f32(<vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x float> %v
+}
+
+define <vscale x 8 x float> @vpgather_baseidx_sext_nxv8i8_nxv8f32(float* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
+  %v = call <vscale x 8 x float> @llvm.vp.gather.nxv8f32.nxv8p0f32(<vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x float> %v
+}
+
+define <vscale x 8 x float> @vpgather_baseidx_zext_nxv8i8_nxv8f32(float* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf4 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf8 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
+  %v = call <vscale x 8 x float> @llvm.vp.gather.nxv8f32.nxv8p0f32(<vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x float> %v
+}
+
+define <vscale x 8 x float> @vpgather_baseidx_nxv8i16_nxv8f32(float* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i16> %idxs
+  %v = call <vscale x 8 x float> @llvm.vp.gather.nxv8f32.nxv8p0f32(<vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x float> %v
+}
+
+define <vscale x 8 x float> @vpgather_baseidx_sext_nxv8i16_nxv8f32(float* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
+  %v = call <vscale x 8 x float> @llvm.vp.gather.nxv8f32.nxv8p0f32(<vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x float> %v
+}
+
+define <vscale x 8 x float> @vpgather_baseidx_zext_nxv8i16_nxv8f32(float* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf2 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf4 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
+  %v = call <vscale x 8 x float> @llvm.vp.gather.nxv8f32.nxv8p0f32(<vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x float> %v
+}
+
+define <vscale x 8 x float> @vpgather_baseidx_nxv8f32(float* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsll.vi v28, v8, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf2 v16, v8
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %idxs
+  %v = call <vscale x 8 x float> @llvm.vp.gather.nxv8f32.nxv8p0f32(<vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x float> %v
+}
+
+declare <vscale x 1 x double> @llvm.vp.gather.nxv1f64.nxv1p0f64(<vscale x 1 x double*>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @vpgather_nxv1f64(<vscale x 1 x double*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT:    vluxei32.v v25, (zero), v8, v0.t
+; RV32-NEXT:    vmv1r.v v8, v25
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv1f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 1 x double> @llvm.vp.gather.nxv1f64.nxv1p0f64(<vscale x 1 x double*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret <vscale x 1 x double> %v
+}
+
+declare <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0f64(<vscale x 2 x double*>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x double> @vpgather_nxv2f64(<vscale x 2 x double*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vluxei32.v v26, (zero), v8, v0.t
+; RV32-NEXT:    vmv2r.v v8, v26
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0f64(<vscale x 2 x double*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x double> %v
+}
+
+declare <vscale x 4 x double> @llvm.vp.gather.nxv4f64.nxv4p0f64(<vscale x 4 x double*>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x double> @vpgather_nxv4f64(<vscale x 4 x double*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v28, (zero), v8, v0.t
+; RV32-NEXT:    vmv4r.v v8, v28
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 4 x double> @llvm.vp.gather.nxv4f64.nxv4p0f64(<vscale x 4 x double*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x double> %v
+}
+
+define <vscale x 4 x double> @vpgather_truemask_nxv4f64(<vscale x 4 x double*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_truemask_nxv4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vluxei32.v v28, (zero), v8
+; RV32-NEXT:    vmv4r.v v8, v28
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_truemask_nxv4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.gather.nxv4f64.nxv4p0f64(<vscale x 4 x double*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret <vscale x 4 x double> %v
+}
+
+declare <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x double> @vpgather_nxv8f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_nxv8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v16, (zero), v8, v0.t
+; RV32-NEXT:    vmv8r.v v8, v16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_nxv8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (zero), v8, v0.t
+; RV64-NEXT:    ret
+  %v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %v
+}
+
+define <vscale x 8 x double> @vpgather_baseidx_nxv8i8_nxv8f64(double* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vsll.vi v8, v16, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i8> %idxs
+  %v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %v
+}
+
+define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i8_nxv8f64(double* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf8 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %v
+}
+
+define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i8_nxv8f64(double* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf8 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %v
+}
+
+define <vscale x 8 x double> @vpgather_baseidx_nxv8i16_nxv8f64(double* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v8
+; RV32-NEXT:    vsll.vi v28, v28, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v8
+; RV64-NEXT:    vsll.vi v8, v16, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i16> %idxs
+  %v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %v
+}
+
+define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i16_nxv8f64(double* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf4 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %v
+}
+
+define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i16_nxv8f64(double* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf4 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %v
+}
+
+define <vscale x 8 x double> @vpgather_baseidx_nxv8i32_nxv8f64(double* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpgather_baseidx_nxv8i32_nxv8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsll.vi v28, v8, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vluxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf2 v16, v8
+; RV64-NEXT:    vsll.vi v8, v16, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i32> %idxs
+  %v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %v
+}
+
+define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i32_nxv8f64(double* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf2 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %v
+}
+
+define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i32_nxv8f64(double* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf2 v16, v8
+; CHECK-NEXT:    vsll.vi v8, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  %v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %v
+}
+
+define <vscale x 8 x double> @vpgather_baseidx_nxv8f64(double* %base, <vscale x 8 x i64> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpgather_baseidx_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsll.vi v8, v8, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v8, v0.t
+; CHECK-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %idxs
+  %v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x double> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
new file mode 100644
index 0000000000000..c041c3411281d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
@@ -0,0 +1,1656 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare void @llvm.vp.scatter.nxv1i8.nxv1p0i8(<vscale x 1 x i8>, <vscale x 1 x i8*>, <vscale x 1 x i1>, i32)
+
+define void @vpscatter_nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv1i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv1i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv1i8.nxv1p0i8(<vscale x 1 x i8> %val, <vscale x 1 x i8*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv2i8.nxv2p0i8(<vscale x 2 x i8>, <vscale x 2 x i8*>, <vscale x 2 x i1>, i32)
+
+define void @vpscatter_nxv2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv2i8.nxv2p0i8(<vscale x 2 x i8> %val, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_nxv2i16_truncstore_nxv2i8(<vscale x 2 x i16> %val, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <vscale x 2 x i16> %val to <vscale x 2 x i8>
+  call void @llvm.vp.scatter.nxv2i8.nxv2p0i8(<vscale x 2 x i8> %tval, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_nxv2i32_truncstore_nxv2i8(<vscale x 2 x i32> %val, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v25, 0
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v25, 0
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i8>
+  call void @llvm.vp.scatter.nxv2i8.nxv2p0i8(<vscale x 2 x i8> %tval, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_nxv2i64_truncstore_nxv2i8(<vscale x 2 x i64> %val, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v25, 0
+; RV32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v25, 0
+; RV32-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v25, 0
+; RV64-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v25, 0
+; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
+  call void @llvm.vp.scatter.nxv2i8.nxv2p0i8(<vscale x 2 x i8> %tval, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv4i8.nxv4p0i8(<vscale x 4 x i8>, <vscale x 4 x i8*>, <vscale x 4 x i1>, i32)
+
+define void @vpscatter_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv4i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv4i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv4i8.nxv4p0i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_nxv4i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_nxv4i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.nxv4i8.nxv4p0i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv8i8.nxv8p0i8(<vscale x 8 x i8>, <vscale x 8 x i8*>, <vscale x 8 x i1>, i32)
+
+define void @vpscatter_nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv8i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v12, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv8i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v16, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv8i8.nxv8p0i8(<vscale x 8 x i8> %val, <vscale x 8 x i8*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v9
+; RV32-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v9
+; RV64-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i8, i8* %base, <vscale x 8 x i8> %idxs
+  call void @llvm.vp.scatter.nxv8i8.nxv8p0i8(<vscale x 8 x i8> %val, <vscale x 8 x i8*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv1i16.nxv1p0i16(<vscale x 1 x i16>, <vscale x 1 x i16*>, <vscale x 1 x i1>, i32)
+
+define void @vpscatter_nxv1i16(<vscale x 1 x i16> %val, <vscale x 1 x i16*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv1i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv1i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv1i16.nxv1p0i16(<vscale x 1 x i16> %val, <vscale x 1 x i16*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv2i16.nxv2p0i16(<vscale x 2 x i16>, <vscale x 2 x i16*>, <vscale x 2 x i1>, i32)
+
+define void @vpscatter_nxv2i16(<vscale x 2 x i16> %val, <vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv2i16.nxv2p0i16(<vscale x 2 x i16> %val, <vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_nxv2i32_truncstore_nxv2i16(<vscale x 2 x i32> %val, <vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i16>
+  call void @llvm.vp.scatter.nxv2i16.nxv2p0i16(<vscale x 2 x i16> %tval, <vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_nxv2i64_truncstore_nxv2i16(<vscale x 2 x i64> %val, <vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v25, 0
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v25, 0
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
+  call void @llvm.vp.scatter.nxv2i16.nxv2p0i16(<vscale x 2 x i16> %tval, <vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16>, <vscale x 4 x i16*>, <vscale x 4 x i1>, i32)
+
+define void @vpscatter_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_nxv4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_nxv4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16>, <vscale x 8 x i16*>, <vscale x 8 x i1>, i32)
+
+define void @vpscatter_nxv8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v12, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v16, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i8_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v10
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v10
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i8> %idxs
+  call void @llvm.vp.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i8_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v10
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v10
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
+  %ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i16> %eidxs
+  call void @llvm.vp.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i8_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf4 v28, v10
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf8 v16, v10
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
+  %ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i16> %eidxs
+  call void @llvm.vp.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v10
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v10
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i16> %idxs
+  call void @llvm.vp.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv1i32.nxv1p0i32(<vscale x 1 x i32>, <vscale x 1 x i32*>, <vscale x 1 x i1>, i32)
+
+define void @vpscatter_nxv1i32(<vscale x 1 x i32> %val, <vscale x 1 x i32*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv1i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv1i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv1i32.nxv1p0i32(<vscale x 1 x i32> %val, <vscale x 1 x i32*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv2i32.nxv2p0i32(<vscale x 2 x i32>, <vscale x 2 x i32*>, <vscale x 2 x i1>, i32)
+
+define void @vpscatter_nxv2i32(<vscale x 2 x i32> %val, <vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv2i32.nxv2p0i32(<vscale x 2 x i32> %val, <vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_nxv2i64_truncstore_nxv2i32(<vscale x 2 x i64> %val, <vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; RV32-NEXT:    vnsrl.wi v25, v8, 0
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v25, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; RV64-NEXT:    vnsrl.wi v25, v8, 0
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v25, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  %tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
+  call void @llvm.vp.scatter.nxv2i32.nxv2p0i32(<vscale x 2 x i32> %tval, <vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv4i32.nxv4p0i32(<vscale x 4 x i32>, <vscale x 4 x i32*>, <vscale x 4 x i1>, i32)
+
+define void @vpscatter_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv4i32.nxv4p0i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.nxv4i32.nxv4p0i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32>, <vscale x 8 x i32*>, <vscale x 8 x i1>, i32)
+
+define void @vpscatter_nxv8i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v12, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v16, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i8> %idxs
+  call void @llvm.vp.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
+  call void @llvm.vp.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf4 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf8 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
+  call void @llvm.vp.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i16> %idxs
+  call void @llvm.vp.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
+  call void @llvm.vp.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf2 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf4 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
+  call void @llvm.vp.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsll.vi v28, v12, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf2 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %idxs
+  call void @llvm.vp.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv1i64.nxv1p0i64(<vscale x 1 x i64>, <vscale x 1 x i64*>, <vscale x 1 x i1>, i32)
+
+define void @vpscatter_nxv1i64(<vscale x 1 x i64> %val, <vscale x 1 x i64*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv1i64.nxv1p0i64(<vscale x 1 x i64> %val, <vscale x 1 x i64*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv2i64.nxv2p0i64(<vscale x 2 x i64>, <vscale x 2 x i64*>, <vscale x 2 x i1>, i32)
+
+define void @vpscatter_nxv2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv2i64.nxv2p0i64(<vscale x 2 x i64> %val, <vscale x 2 x i64*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv4i64.nxv4p0i64(<vscale x 4 x i64>, <vscale x 4 x i64*>, <vscale x 4 x i1>, i32)
+
+define void @vpscatter_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v12, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv4i64.nxv4p0i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v12
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.nxv4i64.nxv4p0i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64>, <vscale x 8 x i64*>, <vscale x 8 x i1>, i32)
+
+define void @vpscatter_nxv8i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v16, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v16, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v16
+; RV32-NEXT:    vsll.vi v28, v28, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v24, v16
+; RV64-NEXT:    vsll.vi v16, v24, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i8> %idxs
+  call void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf8 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf8 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v16
+; RV32-NEXT:    vsll.vi v28, v28, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v24, v16
+; RV64-NEXT:    vsll.vi v16, v24, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i16> %idxs
+  call void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf4 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf4 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i32_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsll.vi v28, v16, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf2 v24, v16
+; RV64-NEXT:    vsll.vi v16, v24, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i32> %idxs
+  call void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf2 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf2 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i64> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsll.vi v16, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %idxs
+  call void @llvm.vp.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv1f16.nxv1p0f16(<vscale x 1 x half>, <vscale x 1 x half*>, <vscale x 1 x i1>, i32)
+
+define void @vpscatter_nxv1f16(<vscale x 1 x half> %val, <vscale x 1 x half*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv1f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv1f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv1f16.nxv1p0f16(<vscale x 1 x half> %val, <vscale x 1 x half*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv2f16.nxv2p0f16(<vscale x 2 x half>, <vscale x 2 x half*>, <vscale x 2 x i1>, i32)
+
+define void @vpscatter_nxv2f16(<vscale x 2 x half> %val, <vscale x 2 x half*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv2f16.nxv2p0f16(<vscale x 2 x half> %val, <vscale x 2 x half*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv4f16.nxv4p0f16(<vscale x 4 x half>, <vscale x 4 x half*>, <vscale x 4 x i1>, i32)
+
+define void @vpscatter_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv4f16.nxv4p0f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_nxv4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_nxv4f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.nxv4f16.nxv4p0f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half>, <vscale x 8 x half*>, <vscale x 8 x i1>, i32)
+
+define void @vpscatter_nxv8f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v12, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v16, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i8_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v10
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v10
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i8> %idxs
+  call void @llvm.vp.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i8_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v10
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v10
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
+  %ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i16> %eidxs
+  call void @llvm.vp.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i8_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf4 v28, v10
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf8 v16, v10
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
+  %ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i16> %eidxs
+  call void @llvm.vp.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v10
+; RV32-NEXT:    vadd.vv v28, v28, v28
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v10
+; RV64-NEXT:    vadd.vv v16, v16, v16
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i16> %idxs
+  call void @llvm.vp.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv1f32.nxv1p0f32(<vscale x 1 x float>, <vscale x 1 x float*>, <vscale x 1 x i1>, i32)
+
+define void @vpscatter_nxv1f32(<vscale x 1 x float> %val, <vscale x 1 x float*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv1f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv1f32.nxv1p0f32(<vscale x 1 x float> %val, <vscale x 1 x float*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv2f32.nxv2p0f32(<vscale x 2 x float>, <vscale x 2 x float*>, <vscale x 2 x i1>, i32)
+
+define void @vpscatter_nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x float*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv2f32.nxv2p0f32(<vscale x 2 x float> %val, <vscale x 2 x float*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv4f32.nxv4p0f32(<vscale x 4 x float>, <vscale x 4 x float*>, <vscale x 4 x i1>, i32)
+
+define void @vpscatter_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv4f32.nxv4p0f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_nxv4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_nxv4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.nxv4f32.nxv4p0f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float>, <vscale x 8 x float*>, <vscale x 8 x i1>, i32)
+
+define void @vpscatter_nxv8f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v12, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v16, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i8_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i8> %idxs
+  call void @llvm.vp.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
+  call void @llvm.vp.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf4 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf8 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
+  call void @llvm.vp.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i16_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i16> %idxs
+  call void @llvm.vp.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
+  call void @llvm.vp.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vzext.vf2 v28, v12
+; RV32-NEXT:    vsll.vi v28, v28, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vzext.vf4 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
+  call void @llvm.vp.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsll.vi v28, v12, 2
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf2 v16, v12
+; RV64-NEXT:    vsll.vi v16, v16, 2
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %idxs
+  call void @llvm.vp.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv1f64.nxv1p0f64(<vscale x 1 x double>, <vscale x 1 x double*>, <vscale x 1 x i1>, i32)
+
+define void @vpscatter_nxv1f64(<vscale x 1 x double> %val, <vscale x 1 x double*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v9, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv1f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v9, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv1f64.nxv1p0f64(<vscale x 1 x double> %val, <vscale x 1 x double*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv2f64.nxv2p0f64(<vscale x 2 x double>, <vscale x 2 x double*>, <vscale x 2 x i1>, i32)
+
+define void @vpscatter_nxv2f64(<vscale x 2 x double> %val, <vscale x 2 x double*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v10, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v10, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv2f64.nxv2p0f64(<vscale x 2 x double> %val, <vscale x 2 x double*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv4f64.nxv4p0f64(<vscale x 4 x double>, <vscale x 4 x double*>, <vscale x 4 x i1>, i32)
+
+define void @vpscatter_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v12, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv4f64.nxv4p0f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_truemask_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_truemask_nxv4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v12
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_truemask_nxv4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v12
+; RV64-NEXT:    ret
+  %mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  call void @llvm.vp.scatter.nxv4f64.nxv4p0f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  ret void
+}
+
+declare void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double>, <vscale x 8 x double*>, <vscale x 8 x i1>, i32)
+
+define void @vpscatter_nxv8f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_nxv8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (zero), v16, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_nxv8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (zero), v16, v0.t
+; RV64-NEXT:    ret
+  call void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i8_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf4 v28, v16
+; RV32-NEXT:    vsll.vi v28, v28, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v24, v16
+; RV64-NEXT:    vsll.vi v16, v24, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i8> %idxs
+  call void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf8 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf8 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i16_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsext.vf2 v28, v16
+; RV32-NEXT:    vsll.vi v28, v28, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf4 v24, v16
+; RV64-NEXT:    vsll.vi v16, v24, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i16> %idxs
+  call void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf4 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf4 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8i32_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vpscatter_baseidx_nxv8i32_nxv8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsll.vi v28, v16, 3
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf2 v24, v16
+; RV64-NEXT:    vsll.vi v16, v24, 3
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i32> %idxs
+  call void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsext.vf2 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vzext.vf2 v24, v16
+; CHECK-NEXT:    vsll.vi v16, v24, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
+  call void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}
+
+define void @vpscatter_baseidx_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i64> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpscatter_baseidx_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
+; CHECK-NEXT:    vsll.vi v16, v16, 3
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %idxs
+  call void @llvm.vp.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
+  ret void
+}


        


More information about the llvm-commits mailing list