[llvm] 2ea7014 - [DAGCombiner] Use isConstantSplatVectorAllZeros/Ones instead of isBuildVectorAllZeros/Ones in visitMSTORE and visitMLOAD.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 12 12:15:57 PST 2021


Author: Craig Topper
Date: 2021-03-12T12:14:56-08:00
New Revision: 2ea70140893b85ace356449c5de8db20c1cff95e

URL: https://github.com/llvm/llvm-project/commit/2ea70140893b85ace356449c5de8db20c1cff95e
DIFF: https://github.com/llvm/llvm-project/commit/2ea70140893b85ace356449c5de8db20c1cff95e.diff

LOG: [DAGCombiner] Use isConstantSplatVectorAllZeros/Ones instead of isBuildVectorAllZeros/Ones in visitMSTORE and visitMLOAD.

This allows us to optimize when the mask is a splat_vector in
addition to build_vector.

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
    llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index caee795fdef9..273b7ced3977 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -9637,12 +9637,12 @@ SDValue DAGCombiner::visitMSTORE(SDNode *N) {
   SDLoc DL(N);
 
   // Zap masked stores with a zero mask.
-  if (ISD::isBuildVectorAllZeros(Mask.getNode()))
+  if (ISD::isConstantSplatVectorAllZeros(Mask.getNode()))
     return Chain;
 
   // If this is a masked load with an all ones mask, we can use a unmasked load.
   // FIXME: Can we do this for indexed, compressing, or truncating stores?
-  if (ISD::isBuildVectorAllOnes(Mask.getNode()) &&
+  if (ISD::isConstantSplatVectorAllOnes(Mask.getNode()) &&
       MST->isUnindexed() && !MST->isCompressingStore() &&
       !MST->isTruncatingStore())
     return DAG.getStore(MST->getChain(), SDLoc(N), MST->getValue(),
@@ -9694,12 +9694,12 @@ SDValue DAGCombiner::visitMLOAD(SDNode *N) {
   SDLoc DL(N);
 
   // Zap masked loads with a zero mask.
-  if (ISD::isBuildVectorAllZeros(Mask.getNode()))
+  if (ISD::isConstantSplatVectorAllZeros(Mask.getNode()))
     return CombineTo(N, MLD->getPassThru(), MLD->getChain());
 
   // If this is a masked load with an all ones mask, we can use a unmasked load.
   // FIXME: Can we do this for indexed, expanding, or extending loads?
-  if (ISD::isBuildVectorAllOnes(Mask.getNode()) &&
+  if (ISD::isConstantSplatVectorAllOnes(Mask.getNode()) &&
       MLD->isUnindexed() && !MLD->isExpandingLoad() &&
       MLD->getExtensionType() == ISD::NON_EXTLOAD) {
     SDValue NewLd = DAG.getLoad(N->getValueType(0), SDLoc(N), MLD->getChain(),

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
index 08a3ffd6168d..c7133f6d7900 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
@@ -247,9 +247,6 @@ declare <vscale x 64 x i8> @llvm.masked.load.nxv64i8(<vscale x 64 x i8>*, i32, <
 define <vscale x 2 x i8> @masked_load_zero_mask(<vscale x 2 x i8>* %a) nounwind {
 ; CHECK-LABEL: masked_load_zero_mask:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> zeroinitializer, <vscale x 2 x i8> undef)
   ret <vscale x 2 x i8> %load
@@ -259,9 +256,7 @@ define <vscale x 2 x i8> @masked_load_allones_mask(<vscale x 2 x i8>* %a, <vscal
 ; CHECK-LABEL: masked_load_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
-; CHECK-NEXT:    vle8.v v8, (a0), v0.t
+; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %insert = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %insert, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
index e886c9a68809..57aef349222f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
@@ -247,9 +247,6 @@ declare void @llvm.masked.store.v64i8.p0v64i8(<vscale x 64 x i8>, <vscale x 64 x
 define void @masked_store_zero_mask(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a) nounwind {
 ; CHECK-LABEL: masked_store_zero_mask:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
   call void @llvm.masked.store.v2i8.p0v2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> zeroinitializer)
   ret void
@@ -259,8 +256,7 @@ define void @masked_store_allones_mask(<vscale x 2 x i8> %val, <vscale x 2 x i8>
 ; CHECK-LABEL: masked_store_allones_mask:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    vse8.v v8, (a0), v0.t
+; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %insert = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %insert, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer


        


More information about the llvm-commits mailing list