[llvm] ff2622b - [RISCV] Optimize gather/scatter to unit-stride memop + shuffle (#66279)

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 15 15:54:48 PDT 2023


Author: Philip Reames
Date: 2023-09-15T15:54:32-07:00
New Revision: ff2622b5ac6befc1f0814f83dabed8c04d7215ef

URL: https://github.com/llvm/llvm-project/commit/ff2622b5ac6befc1f0814f83dabed8c04d7215ef
DIFF: https://github.com/llvm/llvm-project/commit/ff2622b5ac6befc1f0814f83dabed8c04d7215ef.diff

LOG: [RISCV] Optimize gather/scatter to unit-stride memop + shuffle (#66279)

If we have a gather or a scatter whose index describes a permutation of the
lanes, we can lower this as a shuffle + a unit strided memory operation.  For
RISCV, this replaces a indexed load/store with a unit strided memory operation
and a vrgather (at worst).

I did not bother to implement the vp.scatter and vp.gather variants of these
transforms because they'd only be legal when EVL was VLMAX.  Given that, they
should have been transformed to the non-vp variants anyways.  I haven't checked
to see if they actually are.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7d28d52dd12e353..f5cc0ffb8fd01bc 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -13527,6 +13527,40 @@ static bool legalizeScatterGatherIndexType(SDLoc DL, SDValue &Index,
   return true;
 }
 
+/// Match the index vector of a scatter or gather node as the shuffle mask
+/// which performs the rearrangement if possible.  Will only match if
+/// all lanes are touched, and thus replacing the scatter or gather with
+/// a unit strided access and shuffle is legal.
+static bool matchIndexAsShuffle(EVT VT, SDValue Index, SDValue Mask,
+                                SmallVector<int> &ShuffleMask) {
+  if (!ISD::isConstantSplatVectorAllOnes(Mask.getNode()))
+    return false;
+  if (!ISD::isBuildVectorOfConstantSDNodes(Index.getNode()))
+    return false;
+
+  const unsigned ElementSize = VT.getScalarStoreSize();
+  const unsigned NumElems = VT.getVectorNumElements();
+
+  // Create the shuffle mask and check all bits active
+  assert(ShuffleMask.empty());
+  BitVector ActiveLanes(NumElems);
+  for (unsigned i = 0; i < Index->getNumOperands(); i++) {
+    // TODO: We've found an active bit of UB, and could be
+    // more aggressive here if desired.
+    if (Index->getOperand(i)->isUndef())
+      return false;
+    uint64_t C = Index->getConstantOperandVal(i);
+    if (C % ElementSize != 0)
+      return false;
+    C = C / ElementSize;
+    if (C >= NumElems)
+      return false;
+    ShuffleMask.push_back(C);
+    ActiveLanes.set(C);
+  }
+  return ActiveLanes.all();
+}
+
 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
                                                DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -13874,6 +13908,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
   }
   case ISD::MGATHER: {
     const auto *MGN = dyn_cast<MaskedGatherSDNode>(N);
+    const EVT VT = N->getValueType(0);
     SDValue Index = MGN->getIndex();
     SDValue ScaleOp = MGN->getScale();
     ISD::MemIndexType IndexType = MGN->getIndexType();
@@ -13894,6 +13929,19 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
            MGN->getBasePtr(), Index, ScaleOp},
           MGN->getMemOperand(), IndexType, MGN->getExtensionType());
+
+    SmallVector<int> ShuffleMask;
+    if (MGN->getExtensionType() == ISD::NON_EXTLOAD &&
+        matchIndexAsShuffle(VT, Index, MGN->getMask(), ShuffleMask)) {
+      SDValue Load = DAG.getMaskedLoad(VT, DL, MGN->getChain(),
+                                       MGN->getBasePtr(), DAG.getUNDEF(XLenVT),
+                                       MGN->getMask(), DAG.getUNDEF(VT),
+                                       MGN->getMemoryVT(), MGN->getMemOperand(),
+                                       ISD::UNINDEXED, ISD::NON_EXTLOAD);
+      SDValue Shuffle =
+        DAG.getVectorShuffle(VT, DL, Load, DAG.getUNDEF(VT), ShuffleMask);
+      return DAG.getMergeValues({Shuffle, Load.getValue(1)}, DL);
+    }
     break;
   }
   case ISD::MSCATTER:{
@@ -13918,6 +13966,18 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
           {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
            Index, ScaleOp},
           MSN->getMemOperand(), IndexType, MSN->isTruncatingStore());
+
+    EVT VT = MSN->getValue()->getValueType(0);
+    SmallVector<int> ShuffleMask;
+    if (!MSN->isTruncatingStore() &&
+        matchIndexAsShuffle(VT, Index, MSN->getMask(), ShuffleMask)) {
+      SDValue Shuffle = DAG.getVectorShuffle(VT, DL, MSN->getValue(),
+                                             DAG.getUNDEF(VT), ShuffleMask);
+      return DAG.getMaskedStore(MSN->getChain(), DL, Shuffle, MSN->getBasePtr(),
+                                DAG.getUNDEF(XLenVT), MSN->getMask(),
+                                MSN->getMemoryVT(), MSN->getMemOperand(),
+                                ISD::UNINDEXED, false);
+    }
     break;
   }
   case ISD::VP_GATHER: {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 6c6ffe656f433b4..e19a878187e2c29 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -13016,11 +13016,8 @@ define <4 x i32> @mgather_unit_stride_load(ptr %base) {
 ;
 ; RV64V-LABEL: mgather_unit_stride_load:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64V-NEXT:    vid.v v8
-; RV64V-NEXT:    vsll.vi v10, v8, 2
-; RV64V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64V-NEXT:    vluxei64.v v8, (a0), v10
+; RV64V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64V-NEXT:    vle32.v v8, (a0)
 ; RV64V-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_unit_stride_load:
@@ -13154,18 +13151,13 @@ define <4 x i32> @mgather_unit_stride_load_narrow_idx(ptr %base) {
 ; RV32-LABEL: mgather_unit_stride_load_narrow_idx:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vid.v v8
-; RV32-NEXT:    vsll.vi v8, v8, 2
-; RV32-NEXT:    vluxei32.v v8, (a0), v8
+; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64V-LABEL: mgather_unit_stride_load_narrow_idx:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64V-NEXT:    vid.v v8
-; RV64V-NEXT:    vsll.vi v10, v8, 2
-; RV64V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64V-NEXT:    vluxei64.v v8, (a0), v10
+; RV64V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64V-NEXT:    vle32.v v8, (a0)
 ; RV64V-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_unit_stride_load_narrow_idx:
@@ -13225,18 +13217,13 @@ define <4 x i32> @mgather_unit_stride_load_wide_idx(ptr %base) {
 ; RV32-LABEL: mgather_unit_stride_load_wide_idx:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vid.v v8
-; RV32-NEXT:    vsll.vi v8, v8, 2
-; RV32-NEXT:    vluxei32.v v8, (a0), v8
+; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64V-LABEL: mgather_unit_stride_load_wide_idx:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64V-NEXT:    vid.v v8
-; RV64V-NEXT:    vsll.vi v10, v8, 2
-; RV64V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64V-NEXT:    vluxei64.v v8, (a0), v10
+; RV64V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64V-NEXT:    vle32.v v8, (a0)
 ; RV64V-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_unit_stride_load_wide_idx:
@@ -13601,3 +13588,266 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
   ret <8 x i16> %v
 }
 
+define <8 x i16> @mgather_shuffle_reverse(ptr %base) {
+; RV32-LABEL: mgather_shuffle_reverse:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vle16.v v9, (a0)
+; RV32-NEXT:    vid.v v8
+; RV32-NEXT:    vrsub.vi v10, v8, 7
+; RV32-NEXT:    vrgather.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64V-LABEL: mgather_shuffle_reverse:
+; RV64V:       # %bb.0:
+; RV64V-NEXT:    addi a0, a0, 14
+; RV64V-NEXT:    li a1, -2
+; RV64V-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64V-NEXT:    vlse16.v v8, (a0), a1
+; RV64V-NEXT:    ret
+;
+; RV64ZVE32F-LABEL: mgather_shuffle_reverse:
+; RV64ZVE32F:       # %bb.0:
+; RV64ZVE32F-NEXT:    addi a0, a0, 14
+; RV64ZVE32F-NEXT:    li a1, -2
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), a1
+; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  ret <8 x i16> %v
+}
+
+define <8 x i16> @mgather_shuffle_rotate(ptr %base) {
+; RV32-LABEL: mgather_shuffle_rotate:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vle16.v v9, (a0)
+; RV32-NEXT:    vslidedown.vi v8, v9, 4
+; RV32-NEXT:    vslideup.vi v8, v9, 4
+; RV32-NEXT:    ret
+;
+; RV64V-LABEL: mgather_shuffle_rotate:
+; RV64V:       # %bb.0:
+; RV64V-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64V-NEXT:    vle16.v v9, (a0)
+; RV64V-NEXT:    vslidedown.vi v8, v9, 4
+; RV64V-NEXT:    vslideup.vi v8, v9, 4
+; RV64V-NEXT:    ret
+;
+; RV64ZVE32F-LABEL: mgather_shuffle_rotate:
+; RV64ZVE32F:       # %bb.0:
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB110_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_10
+; RV64ZVE32F-NEXT:  .LBB110_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_11
+; RV64ZVE32F-NEXT:  .LBB110_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_12
+; RV64ZVE32F-NEXT:  .LBB110_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_13
+; RV64ZVE32F-NEXT:  .LBB110_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_14
+; RV64ZVE32F-NEXT:  .LBB110_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_15
+; RV64ZVE32F-NEXT:  .LBB110_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB110_16
+; RV64ZVE32F-NEXT:  .LBB110_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB110_9: # %cond.load
+; RV64ZVE32F-NEXT:    addi a2, a0, 8
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a2), zero
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_2
+; RV64ZVE32F-NEXT:  .LBB110_10: # %cond.load1
+; RV64ZVE32F-NEXT:    addi a2, a0, 10
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_3
+; RV64ZVE32F-NEXT:  .LBB110_11: # %cond.load4
+; RV64ZVE32F-NEXT:    addi a2, a0, 12
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_4
+; RV64ZVE32F-NEXT:  .LBB110_12: # %cond.load7
+; RV64ZVE32F-NEXT:    addi a2, a0, 14
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_5
+; RV64ZVE32F-NEXT:  .LBB110_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_6
+; RV64ZVE32F-NEXT:  .LBB110_14: # %cond.load13
+; RV64ZVE32F-NEXT:    addi a2, a0, 2
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_7
+; RV64ZVE32F-NEXT:  .LBB110_15: # %cond.load16
+; RV64ZVE32F-NEXT:    addi a2, a0, 4
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB110_8
+; RV64ZVE32F-NEXT:  .LBB110_16: # %cond.load19
+; RV64ZVE32F-NEXT:    addi a0, a0, 6
+; RV64ZVE32F-NEXT:    lh a0, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 4, i64 5, i64 6, i64 7, i64 0, i64 1, i64 2, i64 3>
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  ret <8 x i16> %v
+}
+
+define <8 x i16> @mgather_shuffle_vrgather(ptr %base) {
+; RV32-LABEL: mgather_shuffle_vrgather:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vle16.v v9, (a0)
+; RV32-NEXT:    lui a0, %hi(.LCPI111_0)
+; RV32-NEXT:    addi a0, a0, %lo(.LCPI111_0)
+; RV32-NEXT:    vle16.v v10, (a0)
+; RV32-NEXT:    vrgather.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64V-LABEL: mgather_shuffle_vrgather:
+; RV64V:       # %bb.0:
+; RV64V-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64V-NEXT:    vle16.v v9, (a0)
+; RV64V-NEXT:    lui a0, %hi(.LCPI111_0)
+; RV64V-NEXT:    addi a0, a0, %lo(.LCPI111_0)
+; RV64V-NEXT:    vle16.v v10, (a0)
+; RV64V-NEXT:    vrgather.vv v8, v9, v10
+; RV64V-NEXT:    ret
+;
+; RV64ZVE32F-LABEL: mgather_shuffle_vrgather:
+; RV64ZVE32F:       # %bb.0:
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB111_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_10
+; RV64ZVE32F-NEXT:  .LBB111_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_11
+; RV64ZVE32F-NEXT:  .LBB111_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_12
+; RV64ZVE32F-NEXT:  .LBB111_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_13
+; RV64ZVE32F-NEXT:  .LBB111_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_14
+; RV64ZVE32F-NEXT:  .LBB111_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_15
+; RV64ZVE32F-NEXT:  .LBB111_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB111_16
+; RV64ZVE32F-NEXT:  .LBB111_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB111_9: # %cond.load
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_2
+; RV64ZVE32F-NEXT:  .LBB111_10: # %cond.load1
+; RV64ZVE32F-NEXT:    addi a2, a0, 4
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_3
+; RV64ZVE32F-NEXT:  .LBB111_11: # %cond.load4
+; RV64ZVE32F-NEXT:    addi a2, a0, 6
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_4
+; RV64ZVE32F-NEXT:  .LBB111_12: # %cond.load7
+; RV64ZVE32F-NEXT:    addi a2, a0, 2
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_5
+; RV64ZVE32F-NEXT:  .LBB111_13: # %cond.load10
+; RV64ZVE32F-NEXT:    addi a2, a0, 8
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_6
+; RV64ZVE32F-NEXT:  .LBB111_14: # %cond.load13
+; RV64ZVE32F-NEXT:    addi a2, a0, 10
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_7
+; RV64ZVE32F-NEXT:  .LBB111_15: # %cond.load16
+; RV64ZVE32F-NEXT:    addi a2, a0, 12
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB111_8
+; RV64ZVE32F-NEXT:  .LBB111_16: # %cond.load19
+; RV64ZVE32F-NEXT:    addi a0, a0, 14
+; RV64ZVE32F-NEXT:    lh a0, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 0, i64 2, i64 3, i64 1, i64 4, i64 5, i64 6, i64 7>
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  ret <8 x i16> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index 9e19bb7845831ee..8f7f81ec65769a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -11292,3 +11292,201 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
   call void @llvm.masked.scatter.v32i8.v32p0(<32 x i8> %val, <32 x ptr> %ptrs, i32 1, <32 x i1> %m)
   ret void
 }
+
+define void @mscatter_unit_stride(<8 x i16> %val, ptr %base) {
+; RV32-LABEL: mscatter_unit_stride:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vse16.v v8, (a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: mscatter_unit_stride:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a1, 2
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT:    vsse16.v v8, (a0), a1
+; RV64-NEXT:    ret
+;
+; RV64ZVE32F-LABEL: mscatter_unit_stride:
+; RV64ZVE32F:       # %bb.0:
+; RV64ZVE32F-NEXT:    li a1, 2
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vsse16.v v8, (a0), a1
+; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
+  ret void
+}
+
+define void @mscatter_unit_stride_with_offset(<8 x i16> %val, ptr %base) {
+; RV32-LABEL: mscatter_unit_stride_with_offset:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vid.v v10
+; RV32-NEXT:    vadd.vv v10, v10, v10
+; RV32-NEXT:    vadd.vi v10, v10, 10
+; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT:    vsoxei32.v v8, (a0), v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: mscatter_unit_stride_with_offset:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, a0, 10
+; RV64-NEXT:    li a1, 2
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT:    vsse16.v v8, (a0), a1
+; RV64-NEXT:    ret
+;
+; RV64ZVE32F-LABEL: mscatter_unit_stride_with_offset:
+; RV64ZVE32F:       # %bb.0:
+; RV64ZVE32F-NEXT:    addi a0, a0, 10
+; RV64ZVE32F-NEXT:    li a1, 2
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vsse16.v v8, (a0), a1
+; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12>
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
+  ret void
+}
+
+define void @mscatter_shuffle_reverse(<8 x i16> %val, ptr %base) {
+; RV32-LABEL: mscatter_shuffle_reverse:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vid.v v9
+; RV32-NEXT:    vrsub.vi v9, v9, 7
+; RV32-NEXT:    vrgather.vv v10, v8, v9
+; RV32-NEXT:    vse16.v v10, (a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: mscatter_shuffle_reverse:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, a0, 14
+; RV64-NEXT:    li a1, -2
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT:    vsse16.v v8, (a0), a1
+; RV64-NEXT:    ret
+;
+; RV64ZVE32F-LABEL: mscatter_shuffle_reverse:
+; RV64ZVE32F:       # %bb.0:
+; RV64ZVE32F-NEXT:    addi a0, a0, 14
+; RV64ZVE32F-NEXT:    li a1, -2
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vsse16.v v8, (a0), a1
+; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
+  ret void
+}
+
+define void @mscatter_shuffle_rotate(<8 x i16> %val, ptr %base) {
+; RV32-LABEL: mscatter_shuffle_rotate:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v8, 4
+; RV32-NEXT:    vslideup.vi v9, v8, 4
+; RV32-NEXT:    vse16.v v9, (a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: mscatter_shuffle_rotate:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v8, 4
+; RV64-NEXT:    vslideup.vi v9, v8, 4
+; RV64-NEXT:    vse16.v v9, (a0)
+; RV64-NEXT:    ret
+;
+; RV64ZVE32F-LABEL: mscatter_shuffle_rotate:
+; RV64ZVE32F:       # %bb.0:
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB96_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_10
+; RV64ZVE32F-NEXT:  .LBB96_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_11
+; RV64ZVE32F-NEXT:  .LBB96_3: # %else4
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_12
+; RV64ZVE32F-NEXT:  .LBB96_4: # %else6
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_13
+; RV64ZVE32F-NEXT:  .LBB96_5: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_14
+; RV64ZVE32F-NEXT:  .LBB96_6: # %else10
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_15
+; RV64ZVE32F-NEXT:  .LBB96_7: # %else12
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB96_16
+; RV64ZVE32F-NEXT:  .LBB96_8: # %else14
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB96_9: # %cond.store
+; RV64ZVE32F-NEXT:    addi a2, a0, 8
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v8, (a2)
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_2
+; RV64ZVE32F-NEXT:  .LBB96_10: # %cond.store1
+; RV64ZVE32F-NEXT:    addi a2, a0, 10
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
+; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_3
+; RV64ZVE32F-NEXT:  .LBB96_11: # %cond.store3
+; RV64ZVE32F-NEXT:    addi a2, a0, 12
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
+; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_4
+; RV64ZVE32F-NEXT:  .LBB96_12: # %cond.store5
+; RV64ZVE32F-NEXT:    addi a2, a0, 14
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 3
+; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_5
+; RV64ZVE32F-NEXT:  .LBB96_13: # %cond.store7
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
+; RV64ZVE32F-NEXT:    vse16.v v9, (a0)
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_6
+; RV64ZVE32F-NEXT:  .LBB96_14: # %cond.store9
+; RV64ZVE32F-NEXT:    addi a2, a0, 2
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 5
+; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_7
+; RV64ZVE32F-NEXT:  .LBB96_15: # %cond.store11
+; RV64ZVE32F-NEXT:    addi a2, a0, 4
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 6
+; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB96_8
+; RV64ZVE32F-NEXT:  .LBB96_16: # %cond.store13
+; RV64ZVE32F-NEXT:    addi a0, a0, 6
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 7
+; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
+; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 4, i64 5, i64 6, i64 7, i64 0, i64 1, i64 2, i64 3>
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
+  ret void
+}


        


More information about the llvm-commits mailing list