[llvm] 70254cc - [RISCV] Turn splat shuffles of vector loads into strided load with stride of x0.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 22 10:03:11 PDT 2021


Author: Craig Topper
Date: 2021-04-22T10:02:57-07:00
New Revision: 70254ccb69fa82cd953b1609796d239bdcf53923

URL: https://github.com/llvm/llvm-project/commit/70254ccb69fa82cd953b1609796d239bdcf53923
DIFF: https://github.com/llvm/llvm-project/commit/70254ccb69fa82cd953b1609796d239bdcf53923.diff

LOG: [RISCV] Turn splat shuffles of vector loads into strided load with stride of x0.

Implementations are allowed to optimize an x0 stride to perform
less memory accesses. This is the case in SiFive cores.

No idea if this is the case in other implementations. We might
need a tuning flag for this.

Reviewed By: frasercrmck, arcbbb

Differential Revision: https://reviews.llvm.org/D100815

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 066e244e42d2..b422ef31119e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1556,8 +1556,43 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
 
   if (SVN->isSplat()) {
-    int Lane = SVN->getSplatIndex();
+    const int Lane = SVN->getSplatIndex();
     if (Lane >= 0) {
+      MVT SVT = VT.getVectorElementType();
+
+      // Turn splatted vector load into a strided load with an X0 stride.
+      SDValue V = V1;
+      // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
+      // with undef.
+      // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
+      int Offset = Lane;
+      if (V.getOpcode() == ISD::CONCAT_VECTORS) {
+        int OpElements =
+            V.getOperand(0).getSimpleValueType().getVectorNumElements();
+        V = V.getOperand(Offset / OpElements);
+        Offset %= OpElements;
+      }
+
+      // We need to ensure the load isn't atomic or volatile.
+      if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
+        auto *Ld = cast<LoadSDNode>(V);
+        Offset *= SVT.getStoreSize();
+        SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
+                                                   TypeSize::Fixed(Offset), DL);
+
+        SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
+        SDValue IntID =
+            DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
+        SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
+                         DAG.getRegister(RISCV::X0, XLenVT), VL};
+        SDValue NewLoad = DAG.getMemIntrinsicNode(
+            ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
+            DAG.getMachineFunction().getMachineMemOperand(
+                Ld->getMemOperand(), Offset, SVT.getStoreSize()));
+        DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
+        return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
+      }
+
       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
       assert(Lane < (int)NumElts && "Unexpected lane!");
       SDValue Gather =

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
index c8f0dd8df210..a4e59c621011 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
@@ -7,10 +7,10 @@
 define void @gather_const_v8f16(<8 x half>* %x) {
 ; CHECK-LABEL: gather_const_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
-; CHECK-NEXT:    vle16.v v25, (a0)
-; CHECK-NEXT:    vrgather.vi v26, v25, 5
-; CHECK-NEXT:    vse16.v v26, (a0)
+; CHECK-NEXT:    addi a1, a0, 10
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
+; CHECK-NEXT:    vlse16.v v25, (a1), zero
+; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, <8 x half>* %x
   %b = extractelement <8 x half> %a, i32 5
@@ -23,10 +23,10 @@ define void @gather_const_v8f16(<8 x half>* %x) {
 define void @gather_const_v4f32(<4 x float>* %x) {
 ; CHECK-LABEL: gather_const_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
-; CHECK-NEXT:    vle32.v v25, (a0)
-; CHECK-NEXT:    vrgather.vi v26, v25, 2
-; CHECK-NEXT:    vse32.v v26, (a0)
+; CHECK-NEXT:    addi a1, a0, 8
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; CHECK-NEXT:    vlse32.v v25, (a1), zero
+; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %x
   %b = extractelement <4 x float> %a, i32 2
@@ -40,9 +40,8 @@ define void @gather_const_v2f64(<2 x double>* %x) {
 ; CHECK-LABEL: gather_const_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
-; CHECK-NEXT:    vle64.v v25, (a0)
-; CHECK-NEXT:    vrgather.vi v26, v25, 0
-; CHECK-NEXT:    vse64.v v26, (a0)
+; CHECK-NEXT:    vlse64.v v25, (a0), zero
+; CHECK-NEXT:    vse64.v v25, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, <2 x double>* %x
   %b = extractelement <2 x double> %a, i32 0
@@ -55,34 +54,33 @@ define void @gather_const_v2f64(<2 x double>* %x) {
 define void @gather_const_v64f16(<64 x half>* %x) {
 ; LMULMAX8-LABEL: gather_const_v64f16:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a1, zero, 64
-; LMULMAX8-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
-; LMULMAX8-NEXT:    vle16.v v8, (a0)
-; LMULMAX8-NEXT:    addi a1, zero, 47
-; LMULMAX8-NEXT:    vrgather.vx v16, v8, a1
-; LMULMAX8-NEXT:    vse16.v v16, (a0)
+; LMULMAX8-NEXT:    addi a1, a0, 94
+; LMULMAX8-NEXT:    addi a2, zero, 64
+; LMULMAX8-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
+; LMULMAX8-NEXT:    vlse16.v v8, (a1), zero
+; LMULMAX8-NEXT:    vse16.v v8, (a0)
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: gather_const_v64f16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, a0, 80
-; LMULMAX1-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
-; LMULMAX1-NEXT:    vle16.v v25, (a1)
 ; LMULMAX1-NEXT:    addi a6, a0, 16
 ; LMULMAX1-NEXT:    addi a7, a0, 48
-; LMULMAX1-NEXT:    addi a4, a0, 32
-; LMULMAX1-NEXT:    addi a5, a0, 64
-; LMULMAX1-NEXT:    addi a2, a0, 112
-; LMULMAX1-NEXT:    addi a3, a0, 96
-; LMULMAX1-NEXT:    vrgather.vi v26, v25, 7
-; LMULMAX1-NEXT:    vse16.v v26, (a3)
-; LMULMAX1-NEXT:    vse16.v v26, (a2)
-; LMULMAX1-NEXT:    vse16.v v26, (a5)
-; LMULMAX1-NEXT:    vse16.v v26, (a1)
-; LMULMAX1-NEXT:    vse16.v v26, (a4)
-; LMULMAX1-NEXT:    vse16.v v26, (a7)
-; LMULMAX1-NEXT:    vse16.v v26, (a0)
-; LMULMAX1-NEXT:    vse16.v v26, (a6)
+; LMULMAX1-NEXT:    addi a3, a0, 32
+; LMULMAX1-NEXT:    addi a4, a0, 80
+; LMULMAX1-NEXT:    addi a5, a0, 94
+; LMULMAX1-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
+; LMULMAX1-NEXT:    vlse16.v v25, (a5), zero
+; LMULMAX1-NEXT:    addi a1, a0, 64
+; LMULMAX1-NEXT:    addi a5, a0, 112
+; LMULMAX1-NEXT:    addi a2, a0, 96
+; LMULMAX1-NEXT:    vse16.v v25, (a2)
+; LMULMAX1-NEXT:    vse16.v v25, (a5)
+; LMULMAX1-NEXT:    vse16.v v25, (a1)
+; LMULMAX1-NEXT:    vse16.v v25, (a4)
+; LMULMAX1-NEXT:    vse16.v v25, (a3)
+; LMULMAX1-NEXT:    vse16.v v25, (a7)
+; LMULMAX1-NEXT:    vse16.v v25, (a0)
+; LMULMAX1-NEXT:    vse16.v v25, (a6)
 ; LMULMAX1-NEXT:    ret
   %a = load <64 x half>, <64 x half>* %x
   %b = extractelement <64 x half> %a, i32 47
@@ -95,33 +93,33 @@ define void @gather_const_v64f16(<64 x half>* %x) {
 define void @gather_const_v32f32(<32 x float>* %x) {
 ; LMULMAX8-LABEL: gather_const_v32f32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a1, zero, 32
-; LMULMAX8-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
-; LMULMAX8-NEXT:    vle32.v v8, (a0)
-; LMULMAX8-NEXT:    vrgather.vi v16, v8, 17
-; LMULMAX8-NEXT:    vse32.v v16, (a0)
+; LMULMAX8-NEXT:    addi a1, a0, 68
+; LMULMAX8-NEXT:    addi a2, zero, 32
+; LMULMAX8-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
+; LMULMAX8-NEXT:    vlse32.v v8, (a1), zero
+; LMULMAX8-NEXT:    vse32.v v8, (a0)
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: gather_const_v32f32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, a0, 64
-; LMULMAX1-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
-; LMULMAX1-NEXT:    vle32.v v25, (a1)
 ; LMULMAX1-NEXT:    addi a6, a0, 16
 ; LMULMAX1-NEXT:    addi a7, a0, 48
-; LMULMAX1-NEXT:    addi a4, a0, 32
-; LMULMAX1-NEXT:    addi a5, a0, 80
-; LMULMAX1-NEXT:    addi a2, a0, 112
-; LMULMAX1-NEXT:    addi a3, a0, 96
-; LMULMAX1-NEXT:    vrgather.vi v26, v25, 1
-; LMULMAX1-NEXT:    vse32.v v26, (a3)
-; LMULMAX1-NEXT:    vse32.v v26, (a2)
-; LMULMAX1-NEXT:    vse32.v v26, (a1)
-; LMULMAX1-NEXT:    vse32.v v26, (a5)
-; LMULMAX1-NEXT:    vse32.v v26, (a4)
-; LMULMAX1-NEXT:    vse32.v v26, (a7)
-; LMULMAX1-NEXT:    vse32.v v26, (a0)
-; LMULMAX1-NEXT:    vse32.v v26, (a6)
+; LMULMAX1-NEXT:    addi a3, a0, 32
+; LMULMAX1-NEXT:    addi a4, a0, 80
+; LMULMAX1-NEXT:    addi a5, a0, 68
+; LMULMAX1-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
+; LMULMAX1-NEXT:    vlse32.v v25, (a5), zero
+; LMULMAX1-NEXT:    addi a1, a0, 64
+; LMULMAX1-NEXT:    addi a5, a0, 112
+; LMULMAX1-NEXT:    addi a2, a0, 96
+; LMULMAX1-NEXT:    vse32.v v25, (a2)
+; LMULMAX1-NEXT:    vse32.v v25, (a5)
+; LMULMAX1-NEXT:    vse32.v v25, (a1)
+; LMULMAX1-NEXT:    vse32.v v25, (a4)
+; LMULMAX1-NEXT:    vse32.v v25, (a3)
+; LMULMAX1-NEXT:    vse32.v v25, (a7)
+; LMULMAX1-NEXT:    vse32.v v25, (a0)
+; LMULMAX1-NEXT:    vse32.v v25, (a6)
 ; LMULMAX1-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %x
   %b = extractelement <32 x float> %a, i32 17
@@ -134,32 +132,31 @@ define void @gather_const_v32f32(<32 x float>* %x) {
 define void @gather_const_v16f64(<16 x double>* %x) {
 ; LMULMAX8-LABEL: gather_const_v16f64:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli a1, 16, e64,m8,ta,mu
-; LMULMAX8-NEXT:    vle64.v v8, (a0)
-; LMULMAX8-NEXT:    vrgather.vi v16, v8, 10
-; LMULMAX8-NEXT:    vse64.v v16, (a0)
+; LMULMAX8-NEXT:    addi a1, a0, 80
+; LMULMAX8-NEXT:    vsetivli a2, 16, e64,m8,ta,mu
+; LMULMAX8-NEXT:    vlse64.v v8, (a1), zero
+; LMULMAX8-NEXT:    vse64.v v8, (a0)
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: gather_const_v16f64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, a0, 80
-; LMULMAX1-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
-; LMULMAX1-NEXT:    vle64.v v25, (a1)
 ; LMULMAX1-NEXT:    addi a6, a0, 16
 ; LMULMAX1-NEXT:    addi a7, a0, 48
-; LMULMAX1-NEXT:    addi a4, a0, 32
+; LMULMAX1-NEXT:    addi a3, a0, 32
+; LMULMAX1-NEXT:    addi a4, a0, 80
+; LMULMAX1-NEXT:    vsetivli a5, 2, e64,m1,ta,mu
+; LMULMAX1-NEXT:    vlse64.v v25, (a4), zero
 ; LMULMAX1-NEXT:    addi a5, a0, 64
-; LMULMAX1-NEXT:    addi a2, a0, 112
-; LMULMAX1-NEXT:    addi a3, a0, 96
-; LMULMAX1-NEXT:    vrgather.vi v26, v25, 0
-; LMULMAX1-NEXT:    vse64.v v26, (a3)
-; LMULMAX1-NEXT:    vse64.v v26, (a2)
-; LMULMAX1-NEXT:    vse64.v v26, (a5)
-; LMULMAX1-NEXT:    vse64.v v26, (a1)
-; LMULMAX1-NEXT:    vse64.v v26, (a4)
-; LMULMAX1-NEXT:    vse64.v v26, (a7)
-; LMULMAX1-NEXT:    vse64.v v26, (a0)
-; LMULMAX1-NEXT:    vse64.v v26, (a6)
+; LMULMAX1-NEXT:    addi a1, a0, 112
+; LMULMAX1-NEXT:    addi a2, a0, 96
+; LMULMAX1-NEXT:    vse64.v v25, (a2)
+; LMULMAX1-NEXT:    vse64.v v25, (a1)
+; LMULMAX1-NEXT:    vse64.v v25, (a5)
+; LMULMAX1-NEXT:    vse64.v v25, (a4)
+; LMULMAX1-NEXT:    vse64.v v25, (a3)
+; LMULMAX1-NEXT:    vse64.v v25, (a7)
+; LMULMAX1-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-NEXT:    vse64.v v25, (a6)
 ; LMULMAX1-NEXT:    ret
   %a = load <16 x double>, <16 x double>* %x
   %b = extractelement <16 x double> %a, i32 10

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
index 39ea974b6718..375f91c3b928 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
@@ -7,10 +7,10 @@
 define void @gather_const_v16i8(<16 x i8>* %x) {
 ; CHECK-LABEL: gather_const_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a1, 16, e8,m1,ta,mu
-; CHECK-NEXT:    vle8.v v25, (a0)
-; CHECK-NEXT:    vrgather.vi v26, v25, 12
-; CHECK-NEXT:    vse8.v v26, (a0)
+; CHECK-NEXT:    addi a1, a0, 12
+; CHECK-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
+; CHECK-NEXT:    vlse8.v v25, (a1), zero
+; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, <16 x i8>* %x
   %b = extractelement <16 x i8> %a, i32 12
@@ -23,10 +23,10 @@ define void @gather_const_v16i8(<16 x i8>* %x) {
 define void @gather_const_v8i16(<8 x i16>* %x) {
 ; CHECK-LABEL: gather_const_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
-; CHECK-NEXT:    vle16.v v25, (a0)
-; CHECK-NEXT:    vrgather.vi v26, v25, 5
-; CHECK-NEXT:    vse16.v v26, (a0)
+; CHECK-NEXT:    addi a1, a0, 10
+; CHECK-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
+; CHECK-NEXT:    vlse16.v v25, (a1), zero
+; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %x
   %b = extractelement <8 x i16> %a, i32 5
@@ -39,10 +39,10 @@ define void @gather_const_v8i16(<8 x i16>* %x) {
 define void @gather_const_v4i32(<4 x i32>* %x) {
 ; CHECK-LABEL: gather_const_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a1, 4, e32,m1,ta,mu
-; CHECK-NEXT:    vle32.v v25, (a0)
-; CHECK-NEXT:    vrgather.vi v26, v25, 3
-; CHECK-NEXT:    vse32.v v26, (a0)
+; CHECK-NEXT:    addi a1, a0, 12
+; CHECK-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
+; CHECK-NEXT:    vlse32.v v25, (a1), zero
+; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %x
   %b = extractelement <4 x i32> %a, i32 3
@@ -55,10 +55,10 @@ define void @gather_const_v4i32(<4 x i32>* %x) {
 define void @gather_const_v2i64(<2 x i64>* %x) {
 ; CHECK-LABEL: gather_const_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a1, 2, e64,m1,ta,mu
-; CHECK-NEXT:    vle64.v v25, (a0)
-; CHECK-NEXT:    vrgather.vi v26, v25, 1
-; CHECK-NEXT:    vse64.v v26, (a0)
+; CHECK-NEXT:    addi a1, a0, 8
+; CHECK-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
+; CHECK-NEXT:    vlse64.v v25, (a1), zero
+; CHECK-NEXT:    vse64.v v25, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, <2 x i64>* %x
   %b = extractelement <2 x i64> %a, i32 1
@@ -71,26 +71,24 @@ define void @gather_const_v2i64(<2 x i64>* %x) {
 define void @gather_const_v64i8(<64 x i8>* %x) {
 ; LMULMAX4-LABEL: gather_const_v64i8:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    addi a1, zero, 64
-; LMULMAX4-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; LMULMAX4-NEXT:    vle8.v v28, (a0)
-; LMULMAX4-NEXT:    addi a1, zero, 32
-; LMULMAX4-NEXT:    vrgather.vx v8, v28, a1
-; LMULMAX4-NEXT:    vse8.v v8, (a0)
+; LMULMAX4-NEXT:    addi a1, a0, 32
+; LMULMAX4-NEXT:    addi a2, zero, 64
+; LMULMAX4-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
+; LMULMAX4-NEXT:    vlse8.v v28, (a1), zero
+; LMULMAX4-NEXT:    vse8.v v28, (a0)
 ; LMULMAX4-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: gather_const_v64i8:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    addi a1, a0, 32
 ; LMULMAX1-NEXT:    vsetivli a2, 16, e8,m1,ta,mu
-; LMULMAX1-NEXT:    vle8.v v25, (a1)
+; LMULMAX1-NEXT:    vlse8.v v25, (a1), zero
 ; LMULMAX1-NEXT:    addi a2, a0, 16
 ; LMULMAX1-NEXT:    addi a3, a0, 48
-; LMULMAX1-NEXT:    vrgather.vi v26, v25, 0
-; LMULMAX1-NEXT:    vse8.v v26, (a1)
-; LMULMAX1-NEXT:    vse8.v v26, (a3)
-; LMULMAX1-NEXT:    vse8.v v26, (a0)
-; LMULMAX1-NEXT:    vse8.v v26, (a2)
+; LMULMAX1-NEXT:    vse8.v v25, (a1)
+; LMULMAX1-NEXT:    vse8.v v25, (a3)
+; LMULMAX1-NEXT:    vse8.v v25, (a0)
+; LMULMAX1-NEXT:    vse8.v v25, (a2)
 ; LMULMAX1-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %x
   %b = extractelement <64 x i8> %a, i32 32
@@ -103,25 +101,25 @@ define void @gather_const_v64i8(<64 x i8>* %x) {
 define void @gather_const_v16i16(<32 x i16>* %x) {
 ; LMULMAX4-LABEL: gather_const_v16i16:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    addi a1, zero, 32
-; LMULMAX4-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; LMULMAX4-NEXT:    vle16.v v28, (a0)
-; LMULMAX4-NEXT:    vrgather.vi v8, v28, 25
-; LMULMAX4-NEXT:    vse16.v v8, (a0)
+; LMULMAX4-NEXT:    addi a1, a0, 50
+; LMULMAX4-NEXT:    addi a2, zero, 32
+; LMULMAX4-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; LMULMAX4-NEXT:    vlse16.v v28, (a1), zero
+; LMULMAX4-NEXT:    vse16.v v28, (a0)
 ; LMULMAX4-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: gather_const_v16i16:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, a0, 48
+; LMULMAX1-NEXT:    addi a1, a0, 50
 ; LMULMAX1-NEXT:    vsetivli a2, 8, e16,m1,ta,mu
-; LMULMAX1-NEXT:    vle16.v v25, (a1)
-; LMULMAX1-NEXT:    addi a2, a0, 16
+; LMULMAX1-NEXT:    vlse16.v v25, (a1), zero
+; LMULMAX1-NEXT:    addi a1, a0, 16
+; LMULMAX1-NEXT:    addi a2, a0, 48
 ; LMULMAX1-NEXT:    addi a3, a0, 32
-; LMULMAX1-NEXT:    vrgather.vi v26, v25, 1
-; LMULMAX1-NEXT:    vse16.v v26, (a3)
-; LMULMAX1-NEXT:    vse16.v v26, (a1)
-; LMULMAX1-NEXT:    vse16.v v26, (a0)
-; LMULMAX1-NEXT:    vse16.v v26, (a2)
+; LMULMAX1-NEXT:    vse16.v v25, (a3)
+; LMULMAX1-NEXT:    vse16.v v25, (a2)
+; LMULMAX1-NEXT:    vse16.v v25, (a0)
+; LMULMAX1-NEXT:    vse16.v v25, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <32 x i16>, <32 x i16>* %x
   %b = extractelement <32 x i16> %a, i32 25
@@ -134,24 +132,24 @@ define void @gather_const_v16i16(<32 x i16>* %x) {
 define void @gather_const_v16i32(<16 x i32>* %x) {
 ; LMULMAX4-LABEL: gather_const_v16i32:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli a1, 16, e32,m4,ta,mu
-; LMULMAX4-NEXT:    vle32.v v28, (a0)
-; LMULMAX4-NEXT:    vrgather.vi v8, v28, 9
-; LMULMAX4-NEXT:    vse32.v v8, (a0)
+; LMULMAX4-NEXT:    addi a1, a0, 36
+; LMULMAX4-NEXT:    vsetivli a2, 16, e32,m4,ta,mu
+; LMULMAX4-NEXT:    vlse32.v v28, (a1), zero
+; LMULMAX4-NEXT:    vse32.v v28, (a0)
 ; LMULMAX4-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: gather_const_v16i32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, a0, 32
+; LMULMAX1-NEXT:    addi a1, a0, 36
 ; LMULMAX1-NEXT:    vsetivli a2, 4, e32,m1,ta,mu
-; LMULMAX1-NEXT:    vle32.v v25, (a1)
-; LMULMAX1-NEXT:    addi a2, a0, 16
-; LMULMAX1-NEXT:    addi a3, a0, 48
-; LMULMAX1-NEXT:    vrgather.vi v26, v25, 1
-; LMULMAX1-NEXT:    vse32.v v26, (a1)
-; LMULMAX1-NEXT:    vse32.v v26, (a3)
-; LMULMAX1-NEXT:    vse32.v v26, (a0)
-; LMULMAX1-NEXT:    vse32.v v26, (a2)
+; LMULMAX1-NEXT:    vlse32.v v25, (a1), zero
+; LMULMAX1-NEXT:    addi a1, a0, 16
+; LMULMAX1-NEXT:    addi a2, a0, 48
+; LMULMAX1-NEXT:    addi a3, a0, 32
+; LMULMAX1-NEXT:    vse32.v v25, (a3)
+; LMULMAX1-NEXT:    vse32.v v25, (a2)
+; LMULMAX1-NEXT:    vse32.v v25, (a0)
+; LMULMAX1-NEXT:    vse32.v v25, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %x
   %b = extractelement <16 x i32> %a, i32 9
@@ -164,24 +162,24 @@ define void @gather_const_v16i32(<16 x i32>* %x) {
 define void @gather_const_v8i64(<8 x i64>* %x) {
 ; LMULMAX4-LABEL: gather_const_v8i64:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    vsetivli a1, 8, e64,m4,ta,mu
-; LMULMAX4-NEXT:    vle64.v v28, (a0)
-; LMULMAX4-NEXT:    vrgather.vi v8, v28, 3
-; LMULMAX4-NEXT:    vse64.v v8, (a0)
+; LMULMAX4-NEXT:    addi a1, a0, 24
+; LMULMAX4-NEXT:    vsetivli a2, 8, e64,m4,ta,mu
+; LMULMAX4-NEXT:    vlse64.v v28, (a1), zero
+; LMULMAX4-NEXT:    vse64.v v28, (a0)
 ; LMULMAX4-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: gather_const_v8i64:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    addi a1, a0, 16
+; LMULMAX1-NEXT:    addi a1, a0, 24
 ; LMULMAX1-NEXT:    vsetivli a2, 2, e64,m1,ta,mu
-; LMULMAX1-NEXT:    vle64.v v25, (a1)
+; LMULMAX1-NEXT:    vlse64.v v25, (a1), zero
+; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    addi a2, a0, 48
 ; LMULMAX1-NEXT:    addi a3, a0, 32
-; LMULMAX1-NEXT:    vrgather.vi v26, v25, 1
-; LMULMAX1-NEXT:    vse64.v v26, (a3)
-; LMULMAX1-NEXT:    vse64.v v26, (a2)
-; LMULMAX1-NEXT:    vse64.v v26, (a0)
-; LMULMAX1-NEXT:    vse64.v v26, (a1)
+; LMULMAX1-NEXT:    vse64.v v25, (a3)
+; LMULMAX1-NEXT:    vse64.v v25, (a2)
+; LMULMAX1-NEXT:    vse64.v v25, (a0)
+; LMULMAX1-NEXT:    vse64.v v25, (a1)
 ; LMULMAX1-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %x
   %b = extractelement <8 x i64> %a, i32 3
@@ -190,3 +188,35 @@ define void @gather_const_v8i64(<8 x i64>* %x) {
   store <8 x i64> %d, <8 x i64>* %x
   ret void
 }
+
+define void @splat_concat_low(<4 x i16>* %x, <4 x i16>* %y, <8 x i16>* %z) {
+; CHECK-LABEL: splat_concat_low:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a0, a0, 2
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
+; CHECK-NEXT:    vlse16.v v25, (a0), zero
+; CHECK-NEXT:    vse16.v v25, (a2)
+; CHECK-NEXT:    ret
+  %a = load <4 x i16>, <4 x i16>* %x
+  %b = load <4 x i16>, <4 x i16>* %y
+  %c = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %d = shufflevector <8 x i16> %c, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  store <8 x i16> %d, <8 x i16>* %z
+  ret void
+}
+
+define void @splat_concat_high(<4 x i16>* %x, <4 x i16>* %y, <8 x i16>* %z) {
+; CHECK-LABEL: splat_concat_high:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a0, a1, 2
+; CHECK-NEXT:    vsetivli a1, 8, e16,m1,ta,mu
+; CHECK-NEXT:    vlse16.v v25, (a0), zero
+; CHECK-NEXT:    vse16.v v25, (a2)
+; CHECK-NEXT:    ret
+  %a = load <4 x i16>, <4 x i16>* %x
+  %b = load <4 x i16>, <4 x i16>* %y
+  %c = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %d = shufflevector <8 x i16> %c, <8 x i16> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+  store <8 x i16> %d, <8 x i16>* %z
+  ret void
+}


        


More information about the llvm-commits mailing list