[PATCH] D114582: [SDAG] Refine MMO size when converting masked load/store to normal load/store

Dave Green via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 8 02:13:44 PST 2021


This revision was landed with ongoing or failed builds.
This revision was automatically updated to reflect the committed changes.
Closed by commit rG5d7efd4758b3: [SDAG] Refine MMO size when converting masked load/store to normal load/store (authored by dmgreen).

Changed prior to commit:
  https://reviews.llvm.org/D114582?vs=392381&id=392687#toc

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D114582/new/

https://reviews.llvm.org/D114582

Files:
  llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
  llvm/test/CodeGen/X86/masked_loadstore_split.ll


Index: llvm/test/CodeGen/X86/masked_loadstore_split.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/X86/masked_loadstore_split.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx2 -stop-after=finalize-isel | FileCheck %s
+
+define void @split_masked_store(<8 x double>* %0) {
+  ; CHECK-LABEL: name: split_masked_store
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $rdi
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+  ; CHECK-NEXT:   [[AVX_SET0_:%[0-9]+]]:vr256 = AVX_SET0
+  ; CHECK-NEXT:   [[VMOVAPSYrm:%[0-9]+]]:vr256 = VMOVAPSYrm $rip, 1, $noreg, %const.0, $noreg :: (load (s256) from constant-pool)
+  ; CHECK-NEXT:   VMASKMOVPDYmr [[COPY]], 1, $noreg, 32, $noreg, killed [[VMOVAPSYrm]], [[AVX_SET0_]] :: (store unknown-size into %ir.0 + 32, align 8)
+  ; CHECK-NEXT:   VMOVUPDYmr [[COPY]], 1, $noreg, 0, $noreg, [[AVX_SET0_]] :: (store (s256) into %ir.0, align 8)
+  ; CHECK-NEXT:   RET 0
+entry:
+  call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> zeroinitializer, <8 x double>* %0, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false>)
+  ret void
+}
+
+define <8 x double> @split_masked_load(<8 x double>* %0) {
+  ; CHECK-LABEL: name: split_masked_load
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $rdi
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+  ; CHECK-NEXT:   [[VMOVAPSYrm:%[0-9]+]]:vr256 = VMOVAPSYrm $rip, 1, $noreg, %const.0, $noreg :: (load (s256) from constant-pool)
+  ; CHECK-NEXT:   [[VMASKMOVPDYrm:%[0-9]+]]:vr256 = VMASKMOVPDYrm killed [[VMOVAPSYrm]], [[COPY]], 1, $noreg, 32, $noreg :: (load unknown-size from %ir.0 + 32, align 8)
+  ; CHECK-NEXT:   [[VMOVUPDYrm:%[0-9]+]]:vr256 = VMOVUPDYrm [[COPY]], 1, $noreg, 0, $noreg :: (load (s256) from %ir.0, align 8)
+  ; CHECK-NEXT:   $ymm0 = COPY [[VMOVUPDYrm]]
+  ; CHECK-NEXT:   $ymm1 = COPY [[VMASKMOVPDYrm]]
+  ; CHECK-NEXT:   RET 0, $ymm0, $ymm1
+entry:
+  %x = call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* %0, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false>, <8 x double> poison)
+  ret <8 x double> %x
+}
+
+declare <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>)
+declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>)
Index: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
===================================================================
--- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10067,7 +10067,9 @@
   if (ISD::isConstantSplatVectorAllOnes(Mask.getNode()) && MST->isUnindexed() &&
       !MST->isCompressingStore() && !MST->isTruncatingStore())
     return DAG.getStore(MST->getChain(), SDLoc(N), MST->getValue(),
-                        MST->getBasePtr(), MST->getMemOperand());
+                        MST->getBasePtr(), MST->getPointerInfo(),
+                        MST->getOriginalAlign(), MachineMemOperand::MOStore,
+                        MST->getAAInfo());
 
   // Try transforming N to an indexed store.
   if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
@@ -10122,8 +10124,10 @@
   // FIXME: Can we do this for indexed, expanding, or extending loads?
   if (ISD::isConstantSplatVectorAllOnes(Mask.getNode()) && MLD->isUnindexed() &&
       !MLD->isExpandingLoad() && MLD->getExtensionType() == ISD::NON_EXTLOAD) {
-    SDValue NewLd = DAG.getLoad(N->getValueType(0), SDLoc(N), MLD->getChain(),
-                                MLD->getBasePtr(), MLD->getMemOperand());
+    SDValue NewLd = DAG.getLoad(
+        N->getValueType(0), SDLoc(N), MLD->getChain(), MLD->getBasePtr(),
+        MLD->getPointerInfo(), MLD->getOriginalAlign(),
+        MachineMemOperand::MOLoad, MLD->getAAInfo(), MLD->getRanges());
     return CombineTo(N, NewLd, NewLd.getValue(1));
   }
 


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D114582.392687.patch
Type: text/x-patch
Size: 4078 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20211208/3d815074/attachment.bin>


More information about the llvm-commits mailing list