[llvm] 19cc461 - [RISCV] Use VP strided load in concat_vectors combine (#98131)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 9 03:36:03 PDT 2024


Author: Luke Lau
Date: 2024-07-09T18:36:00+08:00
New Revision: 19cc46144d10964a55cc7e7f3abeeba5f8c161ba

URL: https://github.com/llvm/llvm-project/commit/19cc46144d10964a55cc7e7f3abeeba5f8c161ba
DIFF: https://github.com/llvm/llvm-project/commit/19cc46144d10964a55cc7e7f3abeeba5f8c161ba.diff

LOG: [RISCV] Use VP strided load in concat_vectors combine (#98131)

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e2b6a98bc72ff..19f958ccfd2e1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16207,18 +16207,10 @@ static SDValue performCONCAT_VECTORSCombine(SDNode *N, SelectionDAG &DAG,
   if (MustNegateStride)
     Stride = DAG.getNegative(Stride, DL, Stride.getValueType());
 
-  SDVTList VTs = DAG.getVTList({WideVecVT, MVT::Other});
-  SDValue IntID =
-    DAG.getTargetConstant(Intrinsic::riscv_masked_strided_load, DL,
-                          Subtarget.getXLenVT());
-
   SDValue AllOneMask =
     DAG.getSplat(WideVecVT.changeVectorElementType(MVT::i1), DL,
                  DAG.getConstant(1, DL, MVT::i1));
 
-  SDValue Ops[] = {BaseLd->getChain(),   IntID,  DAG.getUNDEF(WideVecVT),
-                   BaseLd->getBasePtr(), Stride, AllOneMask};
-
   uint64_t MemSize;
   if (auto *ConstStride = dyn_cast<ConstantSDNode>(Stride);
       ConstStride && ConstStride->getSExtValue() >= 0)
@@ -16234,8 +16226,11 @@ static SDValue performCONCAT_VECTORSCombine(SDNode *N, SelectionDAG &DAG,
       BaseLd->getPointerInfo(), BaseLd->getMemOperand()->getFlags(), MemSize,
       Align);
 
-  SDValue StridedLoad = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs,
-                                                Ops, WideVecVT, MMO);
+  SDValue StridedLoad = DAG.getStridedLoadVP(
+      WideVecVT, DL, BaseLd->getChain(), BaseLd->getBasePtr(), Stride,
+      AllOneMask,
+      DAG.getConstant(N->getNumOperands(), DL, Subtarget.getXLenVT()), MMO);
+
   for (SDValue Ld : N->ops())
     DAG.makeEquivalentMemoryOrdering(cast<LoadSDNode>(Ld), StridedLoad);
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index 0e1105848440a..cdf0d35843620 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -9,9 +9,9 @@
 define void @widen_2xv4i16(ptr %x, ptr %z) {
 ; CHECK-LABEL: widen_2xv4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, ptr %x
   %b.gep = getelementptr i8, ptr %x, i64 8
@@ -52,9 +52,9 @@ define void @widen_3xv4i16(ptr %x, ptr %z) {
 define void @widen_4xv4i16(ptr %x, ptr %z) {
 ; CHECK-LABEL: widen_4xv4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, ptr %x
   %b.gep = getelementptr i8, ptr %x, i64 8
@@ -90,9 +90,9 @@ define void @widen_4xv4i16_unaligned(ptr %x, ptr %z) {
 ;
 ; RV64-MISALIGN-LABEL: widen_4xv4i16_unaligned:
 ; RV64-MISALIGN:       # %bb.0:
-; RV64-MISALIGN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; RV64-MISALIGN-NEXT:    vle16.v v8, (a0)
-; RV64-MISALIGN-NEXT:    vse16.v v8, (a1)
+; RV64-MISALIGN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-MISALIGN-NEXT:    vle64.v v8, (a0)
+; RV64-MISALIGN-NEXT:    vse64.v v8, (a1)
 ; RV64-MISALIGN-NEXT:    ret
   %a = load <4 x i16>, ptr %x, align 1
   %b.gep = getelementptr i8, ptr %x, i64 8


        


More information about the llvm-commits mailing list