[llvm] r301187 - [SDAG] Teach Chain Analysis about BaseIndexOffset addressing.

Nirav Dave via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 24 08:37:21 PDT 2017


Author: niravd
Date: Mon Apr 24 10:37:20 2017
New Revision: 301187

URL: http://llvm.org/viewvc/llvm-project?rev=301187&view=rev
Log:
[SDAG] Teach Chain Analysis about BaseIndexOffset addressing.

While we use BaseIndexOffset in FindBetterNeighborChains to
appropriately realize they're almost the same address and should be
improved concurrently we do not use it in isAlias using the non-index
understanding FindBaseOffset instead. Adding a BaseIndexOffset check
in isAlias like should allow indexed stores to be merged.

FindBaseOffset to be excised in subsequent patch.

Reviewers: jyknight, aditya_nandakumar, bogner

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D31987

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/trunk/test/CodeGen/X86/i256-add.ll
    llvm/trunk/test/CodeGen/X86/merge_store.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=301187&r1=301186&r2=301187&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Mon Apr 24 10:37:20 2017
@@ -16088,6 +16088,19 @@ bool DAGCombiner::isAlias(LSBaseSDNode *
   if (Op1->isInvariant() && Op0->writeMem())
     return false;
 
+  unsigned NumBytes0 = Op0->getMemoryVT().getSizeInBits() >> 3;
+  unsigned NumBytes1 = Op1->getMemoryVT().getSizeInBits() >> 3;
+
+  // Check for BaseIndexOffset matching.
+  BaseIndexOffset BasePtr0 = BaseIndexOffset::match(Op0->getBasePtr(), DAG);
+  BaseIndexOffset BasePtr1 = BaseIndexOffset::match(Op1->getBasePtr(), DAG);
+  if (BasePtr0.equalBaseIndex(BasePtr1))
+    return !((BasePtr0.Offset + NumBytes0 <= BasePtr1.Offset) ||
+             (BasePtr1.Offset + NumBytes1 <= BasePtr0.Offset));
+
+  // FIXME: findBaseOffset and ConstantValue/GlobalValue/FrameIndex analysis
+  // modified to use BaseIndexOffset.
+
   // Gather base node and offset information.
   SDValue Base0, Base1;
   int64_t Offset0, Offset1;
@@ -16099,8 +16112,6 @@ bool DAGCombiner::isAlias(LSBaseSDNode *
                                       Base1, Offset1, GV1, CV1);
 
   // If they have the same base address, then check to see if they overlap.
-  unsigned NumBytes0 = Op0->getMemoryVT().getSizeInBits() >> 3;
-  unsigned NumBytes1 = Op1->getMemoryVT().getSizeInBits() >> 3;
   if (Base0 == Base1 || (GV0 && (GV0 == GV1)) || (CV0 && (CV0 == CV1)))
     return !((Offset0 + NumBytes0) <= Offset1 ||
              (Offset1 + NumBytes1) <= Offset0);

Modified: llvm/trunk/test/CodeGen/X86/i256-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/i256-add.ll?rev=301187&r1=301186&r2=301187&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/i256-add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/i256-add.ll Mon Apr 24 10:37:20 2017
@@ -12,34 +12,35 @@ define void @add(i256* %p, i256* %q) nou
 ; X32-NEXT:    subl $12, %esp
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl 8(%ecx), %edx
-; X32-NEXT:    movl (%ecx), %ebx
-; X32-NEXT:    movl 4(%ecx), %edi
+; X32-NEXT:    movl 8(%ecx), %edi
+; X32-NEXT:    movl (%ecx), %edx
+; X32-NEXT:    movl 4(%ecx), %ebx
 ; X32-NEXT:    movl 28(%eax), %esi
 ; X32-NEXT:    movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
 ; X32-NEXT:    movl 24(%eax), %ebp
-; X32-NEXT:    addl (%eax), %ebx
-; X32-NEXT:    adcl 4(%eax), %edi
-; X32-NEXT:    adcl 8(%eax), %edx
+; X32-NEXT:    addl (%eax), %edx
 ; X32-NEXT:    movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT:    movl 20(%eax), %esi
+; X32-NEXT:    adcl 4(%eax), %ebx
+; X32-NEXT:    adcl 8(%eax), %edi
+; X32-NEXT:    movl %edi, (%esp) # 4-byte Spill
+; X32-NEXT:    movl 20(%eax), %edi
 ; X32-NEXT:    movl 12(%eax), %edx
-; X32-NEXT:    movl 16(%eax), %eax
+; X32-NEXT:    movl 16(%eax), %esi
 ; X32-NEXT:    adcl 12(%ecx), %edx
-; X32-NEXT:    adcl 16(%ecx), %eax
-; X32-NEXT:    adcl 20(%ecx), %esi
-; X32-NEXT:    adcl 24(%ecx), %ebp
-; X32-NEXT:    movl %ebp, (%esp) # 4-byte Spill
+; X32-NEXT:    adcl 16(%ecx), %esi
+; X32-NEXT:    adcl 20(%ecx), %edi
+; X32-NEXT:    movl %ebp, %eax
+; X32-NEXT:    adcl 24(%ecx), %eax
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ebp # 4-byte Reload
 ; X32-NEXT:    adcl %ebp, 28(%ecx)
+; X32-NEXT:    movl (%esp), %ebp # 4-byte Reload
+; X32-NEXT:    movl %ebp, 8(%ecx)
+; X32-NEXT:    movl %ebx, 4(%ecx)
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
 ; X32-NEXT:    movl %ebx, (%ecx)
-; X32-NEXT:    movl %edi, 4(%ecx)
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT:    movl %edi, 8(%ecx)
 ; X32-NEXT:    movl %edx, 12(%ecx)
-; X32-NEXT:    movl %eax, 16(%ecx)
-; X32-NEXT:    movl %esi, 20(%ecx)
-; X32-NEXT:    movl (%esp), %eax # 4-byte Reload
+; X32-NEXT:    movl %esi, 16(%ecx)
+; X32-NEXT:    movl %edi, 20(%ecx)
 ; X32-NEXT:    movl %eax, 24(%ecx)
 ; X32-NEXT:    addl $12, %esp
 ; X32-NEXT:    popl %esi
@@ -58,9 +59,9 @@ define void @add(i256* %p, i256* %q) nou
 ; X64-NEXT:    adcq 8(%rsi), %rdx
 ; X64-NEXT:    adcq 16(%rsi), %rax
 ; X64-NEXT:    adcq %r8, 24(%rdi)
-; X64-NEXT:    movq %rcx, (%rdi)
-; X64-NEXT:    movq %rdx, 8(%rdi)
 ; X64-NEXT:    movq %rax, 16(%rdi)
+; X64-NEXT:    movq %rdx, 8(%rdi)
+; X64-NEXT:    movq %rcx, (%rdi)
 ; X64-NEXT:    retq
   %a = load i256, i256* %p
   %b = load i256, i256* %q
@@ -96,9 +97,9 @@ define void @sub(i256* %p, i256* %q) nou
 ; X32-NEXT:    sbbl 24(%esi), %eax
 ; X32-NEXT:    movl 28(%esi), %esi
 ; X32-NEXT:    sbbl %esi, 28(%ecx)
-; X32-NEXT:    movl %ebx, (%ecx)
-; X32-NEXT:    movl %ebp, 4(%ecx)
 ; X32-NEXT:    movl %edi, 8(%ecx)
+; X32-NEXT:    movl %ebp, 4(%ecx)
+; X32-NEXT:    movl %ebx, (%ecx)
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
 ; X32-NEXT:    movl %esi, 12(%ecx)
 ; X32-NEXT:    movl (%esp), %esi # 4-byte Reload
@@ -122,9 +123,9 @@ define void @sub(i256* %p, i256* %q) nou
 ; X64-NEXT:    sbbq 8(%rsi), %rdx
 ; X64-NEXT:    sbbq 16(%rsi), %rax
 ; X64-NEXT:    sbbq %r8, 24(%rdi)
-; X64-NEXT:    movq %rcx, (%rdi)
-; X64-NEXT:    movq %rdx, 8(%rdi)
 ; X64-NEXT:    movq %rax, 16(%rdi)
+; X64-NEXT:    movq %rdx, 8(%rdi)
+; X64-NEXT:    movq %rcx, (%rdi)
 ; X64-NEXT:    retq
   %a = load i256, i256* %p
   %b = load i256, i256* %q

Modified: llvm/trunk/test/CodeGen/X86/merge_store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge_store.ll?rev=301187&r1=301186&r2=301187&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge_store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge_store.ll Mon Apr 24 10:37:20 2017
@@ -29,17 +29,8 @@ entry:
   ret void
 }
 
-
-
 ;; CHECK-LABEL: indexed-store-merge
-
-;; We should be able to merge the 4 consecutive stores.  
-;; FIXMECHECK: movl	$0, 2(%rsi,%rdi)
-
-;; CHECK: movb	$0, 2(%rsi,%rdi)
-;; CHECK: movb	$0, 3(%rsi,%rdi)
-;; CHECK: movb	$0, 4(%rsi,%rdi)
-;; CHECK: movb	$0, 5(%rsi,%rdi)
+;; CHECK: movl	$0, 2(%rsi,%rdi)
 ;; CHECK: movb	$0, (%rsi)
 define void @indexed-store-merge(i64 %p, i8* %v) {
 entry:




More information about the llvm-commits mailing list