[llvm] r306820 - Revert "[DAG] Rewrite areNonVolatileConsecutiveLoads to use BaseIndexOffset"

Nirav Dave via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 30 05:56:02 PDT 2017


Author: niravd
Date: Fri Jun 30 05:56:02 2017
New Revision: 306820

URL: http://llvm.org/viewvc/llvm-project?rev=306820&view=rev
Log:
Revert "[DAG] Rewrite areNonVolatileConsecutiveLoads to use BaseIndexOffset"

This reverts commit r306819 which appears be exposing underlying
issues in a stage1 ppc64be build

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
    llvm/trunk/test/CodeGen/BPF/undef.ll
    llvm/trunk/test/CodeGen/MSP430/Inst16mm.ll
    llvm/trunk/test/CodeGen/X86/bswap-wide-int.ll
    llvm/trunk/test/CodeGen/X86/build-vector-128.ll
    llvm/trunk/test/CodeGen/X86/build-vector-256.ll
    llvm/trunk/test/CodeGen/X86/build-vector-512.ll
    llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll
    llvm/trunk/test/CodeGen/X86/wide-integer-cmp.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=306820&r1=306819&r2=306820&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Fri Jun 30 05:56:02 2017
@@ -34,7 +34,6 @@
 #include "llvm/CodeGen/MachineMemOperand.h"
 #include "llvm/CodeGen/MachineValueType.h"
 #include "llvm/CodeGen/RuntimeLibcalls.h"
-#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
 #include "llvm/CodeGen/SelectionDAGNodes.h"
 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
 #include "llvm/CodeGen/ValueTypes.h"
@@ -7631,13 +7630,45 @@ bool SelectionDAG::areNonVolatileConsecu
 
   SDValue Loc = LD->getOperand(1);
   SDValue BaseLoc = Base->getOperand(1);
-
-  auto BaseLocDecomp = BaseIndexOffset::match(BaseLoc, *this);
-  auto LocDecomp = BaseIndexOffset::match(Loc, *this);
-
-  int64_t Offset = 0;
-  if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
-    return (Dist * Bytes == Offset);
+  if (Loc.getOpcode() == ISD::FrameIndex) {
+    if (BaseLoc.getOpcode() != ISD::FrameIndex)
+      return false;
+    const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
+    int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
+    int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
+    int FS  = MFI.getObjectSize(FI);
+    int BFS = MFI.getObjectSize(BFI);
+    if (FS != BFS || FS != (int)Bytes) return false;
+    return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
+  }
+
+  // Handle X + C.
+  if (isBaseWithConstantOffset(Loc)) {
+    int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
+    if (Loc.getOperand(0) == BaseLoc) {
+      // If the base location is a simple address with no offset itself, then
+      // the second load's first add operand should be the base address.
+      if (LocOffset == Dist * (int)Bytes)
+        return true;
+    } else if (isBaseWithConstantOffset(BaseLoc)) {
+      // The base location itself has an offset, so subtract that value from the
+      // second load's offset before comparing to distance * size.
+      int64_t BOffset =
+        cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue();
+      if (Loc.getOperand(0) == BaseLoc.getOperand(0)) {
+        if ((LocOffset - BOffset) == Dist * (int)Bytes)
+          return true;
+      }
+    }
+  }
+  const GlobalValue *GV1 = nullptr;
+  const GlobalValue *GV2 = nullptr;
+  int64_t Offset1 = 0;
+  int64_t Offset2 = 0;
+  bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
+  bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
+  if (isGA1 && isGA2 && GV1 == GV2)
+    return Offset1 == (Offset2 + Dist*Bytes);
   return false;
 }
 

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp?rev=306820&r1=306819&r2=306820&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp Fri Jun 30 05:56:02 2017
@@ -60,18 +60,12 @@ BaseIndexOffset BaseIndexOffset::match(S
   int64_t Offset = 0;
   bool IsIndexSignExt = false;
 
-  // Consume constant adds & ors with appropriate masking.
-  while (Base->getOpcode() == ISD::ADD || Base->getOpcode() == ISD::OR) {
-    if (auto *C = dyn_cast<ConstantSDNode>(Base->getOperand(1))) {
-      // Only consider ORs which act as adds.
-      if (Base->getOpcode() == ISD::OR &&
-          !DAG.MaskedValueIsZero(Base->getOperand(0), C->getAPIntValue()))
-        break;
-      Offset += C->getSExtValue();
-      Base = Base->getOperand(0);
-      continue;
-    }
-    break;
+  // Consume constant adds
+  while (Base->getOpcode() == ISD::ADD &&
+         isa<ConstantSDNode>(Base->getOperand(1))) {
+    int64_t POffset = cast<ConstantSDNode>(Base->getOperand(1))->getSExtValue();
+    Offset += POffset;
+    Base = Base->getOperand(0);
   }
 
   if (Base->getOpcode() == ISD::ADD) {

Modified: llvm/trunk/test/CodeGen/BPF/undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/BPF/undef.ll?rev=306820&r1=306819&r2=306820&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/BPF/undef.ll (original)
+++ llvm/trunk/test/CodeGen/BPF/undef.ll Fri Jun 30 05:56:02 2017
@@ -13,30 +13,36 @@
 
 ; Function Attrs: nounwind uwtable
 define i32 @ebpf_filter(%struct.__sk_buff* nocapture readnone %ebpf_packet) #0 section "socket1" {
-; CHECK: r1 = r10
-; CHECK: r1 += -2
-; CHECK: r2 = 0
-; CHECK: *(u16 *)(r1 + 6) = r2
-; CHECK: *(u16 *)(r1 + 4) = r2
-; CHECK: *(u16 *)(r1 + 2) = r2
-; CHECK: r1 = 134678021
-; CHECK: *(u32 *)(r10 - 8) = r1
-; CHECK: r1 = 9
-; CHECK: *(u8 *)(r10 - 4) = r1
-; CHECK: r1 = 10
-; CHECK: *(u8 *)(r10 - 3) = r1
-; CHECK: *(u16 *)(r10 + 24) = r2
-; CHECK: *(u16 *)(r10 + 22) = r2
-; CHECK: *(u16 *)(r10 + 20) = r2
-; CHECK: *(u16 *)(r10 + 18) = r2
-; CHECK: *(u16 *)(r10 + 16) = r2
-; CHECK: *(u16 *)(r10 + 14) = r2
-; CHECK: *(u16 *)(r10 + 12) = r2
-; CHECK: *(u16 *)(r10 + 10) = r2
-; CHECK: *(u16 *)(r10 + 8) = r2
-; CHECK: *(u16 *)(r10 + 6) = r2
-; CHECK: *(u16 *)(r10 - 2) = r2
-; CHECK: *(u16 *)(r10 + 26) = r2
+; CHECK: r2 = r10
+; CHECK: r2 += -2
+; CHECK: r1 = 0
+; CHECK: *(u16 *)(r2 + 6) = r1
+; CHECK: *(u16 *)(r2 + 4) = r1
+; CHECK: *(u16 *)(r2 + 2) = r1
+; CHECK: r2 = 6
+; CHECK: *(u8 *)(r10 - 7) = r2
+; CHECK: r2 = 5
+; CHECK: *(u8 *)(r10 - 8) = r2
+; CHECK: r2 = 7
+; CHECK: *(u8 *)(r10 - 6) = r2
+; CHECK: r2 = 8
+; CHECK: *(u8 *)(r10 - 5) = r2
+; CHECK: r2 = 9
+; CHECK: *(u8 *)(r10 - 4) = r2
+; CHECK: r2 = 10
+; CHECK: *(u8 *)(r10 - 3) = r2
+; CHECK: *(u16 *)(r10 + 24) = r1
+; CHECK: *(u16 *)(r10 + 22) = r1
+; CHECK: *(u16 *)(r10 + 20) = r1
+; CHECK: *(u16 *)(r10 + 18) = r1
+; CHECK: *(u16 *)(r10 + 16) = r1
+; CHECK: *(u16 *)(r10 + 14) = r1
+; CHECK: *(u16 *)(r10 + 12) = r1
+; CHECK: *(u16 *)(r10 + 10) = r1
+; CHECK: *(u16 *)(r10 + 8) = r1
+; CHECK: *(u16 *)(r10 + 6) = r1
+; CHECK: *(u16 *)(r10 - 2) = r1
+; CHECK: *(u16 *)(r10 + 26) = r1
 ; CHECK: r2 = r10
 ; CHECK: r2 += -8
 ; CHECK: r1 = <MCOperand Expr:(routing)>ll

Modified: llvm/trunk/test/CodeGen/MSP430/Inst16mm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/Inst16mm.ll?rev=306820&r1=306819&r2=306820&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/Inst16mm.ll (original)
+++ llvm/trunk/test/CodeGen/MSP430/Inst16mm.ll Fri Jun 30 05:56:02 2017
@@ -64,6 +64,6 @@ entry:
  %0 = load i16, i16* %retval                          ; <i16> [#uses=1]
  ret i16 %0
 ; CHECK-LABEL: mov2:
-; CHECK-DAG:	mov.w	2(r1), 6(r1)
-; CHECK-DAG:	mov.w	0(r1), 4(r1)
+; CHECK:	mov.w	0(r1), 4(r1)
+; CHECK:	mov.w	2(r1), 6(r1)
 }

Modified: llvm/trunk/test/CodeGen/X86/bswap-wide-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bswap-wide-int.ll?rev=306820&r1=306819&r2=306820&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bswap-wide-int.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bswap-wide-int.ll Fri Jun 30 05:56:02 2017
@@ -71,8 +71,8 @@ define i128 @bswap_i128(i128 %a0) nounwi
 ; X86-MOVBE-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X86-MOVBE-NEXT:    movbel %esi, 12(%eax)
 ; X86-MOVBE-NEXT:    movbel %edi, 8(%eax)
-; X86-MOVBE-NEXT:    movbel %edx, 4(%eax)
-; X86-MOVBE-NEXT:    movbel %ecx, (%eax)
+; X86-MOVBE-NEXT:    movbel %ecx, 4(%eax)
+; X86-MOVBE-NEXT:    movbel %edx, (%eax)
 ; X86-MOVBE-NEXT:    popl %esi
 ; X86-MOVBE-NEXT:    popl %edi
 ; X86-MOVBE-NEXT:    retl $4

Modified: llvm/trunk/test/CodeGen/X86/build-vector-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/build-vector-128.ll?rev=306820&r1=306819&r2=306820&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/build-vector-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/build-vector-128.ll Fri Jun 30 05:56:02 2017
@@ -72,10 +72,12 @@ define <4 x float> @test_buildvector_v4f
 }
 
 define <2 x i64> @test_buildvector_v2i64(i64 %a0, i64 %a1) {
-; SSE-32-LABEL: test_buildvector_v2i64:
-; SSE-32:       # BB#0:
-; SSE-32-NEXT:    movups {{[0-9]+}}(%esp), %xmm0
-; SSE-32-NEXT:    retl
+; SSE2-32-LABEL: test_buildvector_v2i64:
+; SSE2-32:       # BB#0:
+; SSE2-32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-32-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-32-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-32-NEXT:    retl
 ;
 ; SSE-64-LABEL: test_buildvector_v2i64:
 ; SSE-64:       # BB#0:
@@ -84,9 +86,20 @@ define <2 x i64> @test_buildvector_v2i64
 ; SSE-64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-64-NEXT:    retq
 ;
+; SSE41-32-LABEL: test_buildvector_v2i64:
+; SSE41-32:       # BB#0:
+; SSE41-32-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE41-32-NEXT:    pinsrd $1, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT:    pinsrd $2, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT:    pinsrd $3, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT:    retl
+;
 ; AVX-32-LABEL: test_buildvector_v2i64:
 ; AVX-32:       # BB#0:
-; AVX-32-NEXT:    vmovups {{[0-9]+}}(%esp), %xmm0
+; AVX-32-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; AVX-32-NEXT:    retl
 ;
 ; AVX-64-LABEL: test_buildvector_v2i64:

Modified: llvm/trunk/test/CodeGen/X86/build-vector-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/build-vector-256.ll?rev=306820&r1=306819&r2=306820&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/build-vector-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/build-vector-256.ll Fri Jun 30 05:56:02 2017
@@ -51,10 +51,18 @@ define <8 x float> @test_buildvector_v8f
 }
 
 define <4 x i64> @test_buildvector_v4i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) {
-; AVX-32-LABEL: test_buildvector_v4i64:
-; AVX-32:       # BB#0:
-; AVX-32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0
-; AVX-32-NEXT:    retl
+; AVX1-32-LABEL: test_buildvector_v4i64:
+; AVX1-32:       # BB#0:
+; AVX1-32-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-32-NEXT:    retl
 ;
 ; AVX1-64-LABEL: test_buildvector_v4i64:
 ; AVX1-64:       # BB#0:
@@ -67,6 +75,19 @@ define <4 x i64> @test_buildvector_v4i64
 ; AVX1-64-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-64-NEXT:    retq
 ;
+; AVX2-32-LABEL: test_buildvector_v4i64:
+; AVX2-32:       # BB#0:
+; AVX2-32-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-32-NEXT:    retl
+;
 ; AVX2-64-LABEL: test_buildvector_v4i64:
 ; AVX2-64:       # BB#0:
 ; AVX2-64-NEXT:    vmovq %rcx, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/build-vector-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/build-vector-512.ll?rev=306820&r1=306819&r2=306820&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/build-vector-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/build-vector-512.ll Fri Jun 30 05:56:02 2017
@@ -79,7 +79,25 @@ define <16 x float> @test_buildvector_v1
 define <8 x i64> @test_buildvector_v8i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7) {
 ; AVX-32-LABEL: test_buildvector_v8i64:
 ; AVX-32:       # BB#0:
-; AVX-32-NEXT:    vmovups {{[0-9]+}}(%esp), %zmm0
+; AVX-32-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-32-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT:    vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-32-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX-32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX-32-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX-32-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX-32-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX-32-NEXT:    retl
 ;
 ; AVX-64-LABEL: test_buildvector_v8i64:

Modified: llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll?rev=306820&r1=306819&r2=306820&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll (original)
+++ llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll Fri Jun 30 05:56:02 2017
@@ -1063,89 +1063,87 @@ define <32 x i8> @_clearupper32xi8b(<32
 ;
 ; AVX1-LABEL: _clearupper32xi8b:
 ; AVX1:       # BB#0:
-; AVX1-NEXT:    pushq %rbp
-; AVX1-NEXT:    pushq %r15
 ; AVX1-NEXT:    pushq %r14
-; AVX1-NEXT:    pushq %r13
-; AVX1-NEXT:    pushq %r12
 ; AVX1-NEXT:    pushq %rbx
-; AVX1-NEXT:    vmovq %xmm0, %rcx
-; AVX1-NEXT:    movq %rcx, %r8
-; AVX1-NEXT:    movq %rcx, %r9
-; AVX1-NEXT:    movq %rcx, %r10
-; AVX1-NEXT:    movq %rcx, %r11
-; AVX1-NEXT:    movq %rcx, %r14
-; AVX1-NEXT:    movq %rcx, %r15
+; AVX1-NEXT:    vpextrq $1, %xmm0, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vmovq %xmm0, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movq -{{[0-9]+}}(%rsp), %r14
 ; AVX1-NEXT:    vpextrq $1, %xmm0, %rdx
-; AVX1-NEXT:    movq %rdx, %r12
-; AVX1-NEXT:    movq %rdx, %r13
-; AVX1-NEXT:    movq %rdx, %rbx
-; AVX1-NEXT:    movq %rdx, %rax
-; AVX1-NEXT:    movq %rdx, %rdi
+; AVX1-NEXT:    movq %rdx, %r8
+; AVX1-NEXT:    movq %rdx, %r9
+; AVX1-NEXT:    movq %rdx, %r11
 ; AVX1-NEXT:    movq %rdx, %rsi
-; AVX1-NEXT:    movq %rdx, %rbp
+; AVX1-NEXT:    movq %rdx, %rdi
+; AVX1-NEXT:    movq %rdx, %rcx
+; AVX1-NEXT:    movq %rdx, %rax
 ; AVX1-NEXT:    andb $15, %dl
 ; AVX1-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    movq %rcx, %rdx
+; AVX1-NEXT:    shrq $56, %rax
+; AVX1-NEXT:    andb $15, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movq %r14, %r10
+; AVX1-NEXT:    shrq $48, %rcx
 ; AVX1-NEXT:    andb $15, %cl
 ; AVX1-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    shrq $56, %rbp
-; AVX1-NEXT:    andb $15, %bpl
-; AVX1-NEXT:    movb %bpl, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    shrq $48, %rsi
+; AVX1-NEXT:    movq %r14, %rdx
+; AVX1-NEXT:    shrq $40, %rdi
+; AVX1-NEXT:    andb $15, %dil
+; AVX1-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movq %r14, %rax
+; AVX1-NEXT:    shrq $32, %rsi
 ; AVX1-NEXT:    andb $15, %sil
 ; AVX1-NEXT:    movb %sil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    shrq $40, %rdi
+; AVX1-NEXT:    movq %r14, %rcx
+; AVX1-NEXT:    shrq $24, %r11
+; AVX1-NEXT:    andb $15, %r11b
+; AVX1-NEXT:    movb %r11b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movq %r14, %rsi
+; AVX1-NEXT:    shrq $16, %r9
+; AVX1-NEXT:    andb $15, %r9b
+; AVX1-NEXT:    movb %r9b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movq %r14, %rdi
+; AVX1-NEXT:    shrq $8, %r8
+; AVX1-NEXT:    andb $15, %r8b
+; AVX1-NEXT:    movb %r8b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movq %r14, %rbx
+; AVX1-NEXT:    andb $15, %r14b
+; AVX1-NEXT:    movb %r14b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    shrq $8, %r10
+; AVX1-NEXT:    shrq $16, %rdx
+; AVX1-NEXT:    shrq $24, %rax
+; AVX1-NEXT:    shrq $32, %rcx
+; AVX1-NEXT:    shrq $40, %rsi
+; AVX1-NEXT:    shrq $48, %rdi
+; AVX1-NEXT:    shrq $56, %rbx
+; AVX1-NEXT:    andb $15, %bl
+; AVX1-NEXT:    movb %bl, -{{[0-9]+}}(%rsp)
 ; AVX1-NEXT:    andb $15, %dil
 ; AVX1-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    andb $15, %sil
+; AVX1-NEXT:    movb %sil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    andb $15, %cl
+; AVX1-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
 ; AVX1-NEXT:    andb $15, %al
 ; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    shrq $24, %rbx
-; AVX1-NEXT:    andb $15, %bl
-; AVX1-NEXT:    movb %bl, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    shrq $16, %r13
-; AVX1-NEXT:    andb $15, %r13b
-; AVX1-NEXT:    movb %r13b, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    shrq $8, %r12
-; AVX1-NEXT:    andb $15, %r12b
-; AVX1-NEXT:    movb %r12b, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    shrq $8, %r8
-; AVX1-NEXT:    shrq $16, %r9
-; AVX1-NEXT:    shrq $24, %r10
-; AVX1-NEXT:    shrq $32, %r11
-; AVX1-NEXT:    shrq $40, %r14
-; AVX1-NEXT:    shrq $48, %r15
-; AVX1-NEXT:    shrq $56, %rdx
 ; AVX1-NEXT:    andb $15, %dl
 ; AVX1-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    andb $15, %r15b
-; AVX1-NEXT:    movb %r15b, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    andb $15, %r14b
-; AVX1-NEXT:    movb %r14b, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    andb $15, %r11b
-; AVX1-NEXT:    movb %r11b, -{{[0-9]+}}(%rsp)
 ; AVX1-NEXT:    andb $15, %r10b
 ; AVX1-NEXT:    movb %r10b, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    andb $15, %r9b
-; AVX1-NEXT:    movb %r9b, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT:    andb $15, %r8b
-; AVX1-NEXT:    movb %r8b, -{{[0-9]+}}(%rsp)
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
-; AVX1-NEXT:    movq %rax, %rcx
+; AVX1-NEXT:    movq %rax, %r8
 ; AVX1-NEXT:    movq %rax, %rdx
 ; AVX1-NEXT:    movq %rax, %rsi
 ; AVX1-NEXT:    movq %rax, %rdi
-; AVX1-NEXT:    movl %eax, %ebp
 ; AVX1-NEXT:    movl %eax, %ebx
+; AVX1-NEXT:    movl %eax, %ecx
 ; AVX1-NEXT:    vmovd %eax, %xmm1
 ; AVX1-NEXT:    shrl $8, %eax
 ; AVX1-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    shrl $16, %ebx
-; AVX1-NEXT:    vpinsrb $2, %ebx, %xmm1, %xmm1
-; AVX1-NEXT:    shrl $24, %ebp
-; AVX1-NEXT:    vpinsrb $3, %ebp, %xmm1, %xmm1
+; AVX1-NEXT:    shrl $16, %ecx
+; AVX1-NEXT:    vpinsrb $2, %ecx, %xmm1, %xmm1
+; AVX1-NEXT:    shrl $24, %ebx
+; AVX1-NEXT:    vpinsrb $3, %ebx, %xmm1, %xmm1
 ; AVX1-NEXT:    shrq $32, %rdi
 ; AVX1-NEXT:    vpinsrb $4, %edi, %xmm1, %xmm1
 ; AVX1-NEXT:    shrq $40, %rsi
@@ -1155,8 +1153,8 @@ define <32 x i8> @_clearupper32xi8b(<32
 ; AVX1-NEXT:    shrq $48, %rdx
 ; AVX1-NEXT:    vpinsrb $6, %edx, %xmm1, %xmm1
 ; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    shrq $56, %rcx
-; AVX1-NEXT:    vpinsrb $7, %ecx, %xmm1, %xmm0
+; AVX1-NEXT:    shrq $56, %r8
+; AVX1-NEXT:    vpinsrb $7, %r8d, %xmm1, %xmm0
 ; AVX1-NEXT:    movl %eax, %ecx
 ; AVX1-NEXT:    shrl $8, %ecx
 ; AVX1-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
@@ -1224,98 +1222,92 @@ define <32 x i8> @_clearupper32xi8b(<32
 ; AVX1-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    popq %rbx
-; AVX1-NEXT:    popq %r12
-; AVX1-NEXT:    popq %r13
 ; AVX1-NEXT:    popq %r14
-; AVX1-NEXT:    popq %r15
-; AVX1-NEXT:    popq %rbp
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: _clearupper32xi8b:
 ; AVX2:       # BB#0:
-; AVX2-NEXT:    pushq %rbp
-; AVX2-NEXT:    pushq %r15
 ; AVX2-NEXT:    pushq %r14
-; AVX2-NEXT:    pushq %r13
-; AVX2-NEXT:    pushq %r12
 ; AVX2-NEXT:    pushq %rbx
-; AVX2-NEXT:    vmovq %xmm0, %rcx
-; AVX2-NEXT:    movq %rcx, %r8
-; AVX2-NEXT:    movq %rcx, %r9
-; AVX2-NEXT:    movq %rcx, %r10
-; AVX2-NEXT:    movq %rcx, %r11
-; AVX2-NEXT:    movq %rcx, %r14
-; AVX2-NEXT:    movq %rcx, %r15
+; AVX2-NEXT:    vpextrq $1, %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovq %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movq -{{[0-9]+}}(%rsp), %r14
 ; AVX2-NEXT:    vpextrq $1, %xmm0, %rdx
-; AVX2-NEXT:    movq %rdx, %r12
-; AVX2-NEXT:    movq %rdx, %r13
-; AVX2-NEXT:    movq %rdx, %rbx
-; AVX2-NEXT:    movq %rdx, %rax
-; AVX2-NEXT:    movq %rdx, %rdi
+; AVX2-NEXT:    movq %rdx, %r8
+; AVX2-NEXT:    movq %rdx, %r9
+; AVX2-NEXT:    movq %rdx, %r11
 ; AVX2-NEXT:    movq %rdx, %rsi
-; AVX2-NEXT:    movq %rdx, %rbp
+; AVX2-NEXT:    movq %rdx, %rdi
+; AVX2-NEXT:    movq %rdx, %rcx
+; AVX2-NEXT:    movq %rdx, %rax
 ; AVX2-NEXT:    andb $15, %dl
 ; AVX2-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    movq %rcx, %rdx
+; AVX2-NEXT:    shrq $56, %rax
+; AVX2-NEXT:    andb $15, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movq %r14, %r10
+; AVX2-NEXT:    shrq $48, %rcx
 ; AVX2-NEXT:    andb $15, %cl
 ; AVX2-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    shrq $56, %rbp
-; AVX2-NEXT:    andb $15, %bpl
-; AVX2-NEXT:    movb %bpl, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    shrq $48, %rsi
+; AVX2-NEXT:    movq %r14, %rdx
+; AVX2-NEXT:    shrq $40, %rdi
+; AVX2-NEXT:    andb $15, %dil
+; AVX2-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movq %r14, %rax
+; AVX2-NEXT:    shrq $32, %rsi
 ; AVX2-NEXT:    andb $15, %sil
 ; AVX2-NEXT:    movb %sil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    shrq $40, %rdi
+; AVX2-NEXT:    movq %r14, %rcx
+; AVX2-NEXT:    shrq $24, %r11
+; AVX2-NEXT:    andb $15, %r11b
+; AVX2-NEXT:    movb %r11b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movq %r14, %rsi
+; AVX2-NEXT:    shrq $16, %r9
+; AVX2-NEXT:    andb $15, %r9b
+; AVX2-NEXT:    movb %r9b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movq %r14, %rdi
+; AVX2-NEXT:    shrq $8, %r8
+; AVX2-NEXT:    andb $15, %r8b
+; AVX2-NEXT:    movb %r8b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movq %r14, %rbx
+; AVX2-NEXT:    andb $15, %r14b
+; AVX2-NEXT:    movb %r14b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    shrq $8, %r10
+; AVX2-NEXT:    shrq $16, %rdx
+; AVX2-NEXT:    shrq $24, %rax
+; AVX2-NEXT:    shrq $32, %rcx
+; AVX2-NEXT:    shrq $40, %rsi
+; AVX2-NEXT:    shrq $48, %rdi
+; AVX2-NEXT:    shrq $56, %rbx
+; AVX2-NEXT:    andb $15, %bl
+; AVX2-NEXT:    movb %bl, -{{[0-9]+}}(%rsp)
 ; AVX2-NEXT:    andb $15, %dil
 ; AVX2-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    andb $15, %sil
+; AVX2-NEXT:    movb %sil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    andb $15, %cl
+; AVX2-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
 ; AVX2-NEXT:    andb $15, %al
 ; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    shrq $24, %rbx
-; AVX2-NEXT:    andb $15, %bl
-; AVX2-NEXT:    movb %bl, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    shrq $16, %r13
-; AVX2-NEXT:    andb $15, %r13b
-; AVX2-NEXT:    movb %r13b, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    shrq $8, %r12
-; AVX2-NEXT:    andb $15, %r12b
-; AVX2-NEXT:    movb %r12b, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    shrq $8, %r8
-; AVX2-NEXT:    shrq $16, %r9
-; AVX2-NEXT:    shrq $24, %r10
-; AVX2-NEXT:    shrq $32, %r11
-; AVX2-NEXT:    shrq $40, %r14
-; AVX2-NEXT:    shrq $48, %r15
-; AVX2-NEXT:    shrq $56, %rdx
 ; AVX2-NEXT:    andb $15, %dl
 ; AVX2-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    andb $15, %r15b
-; AVX2-NEXT:    movb %r15b, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    andb $15, %r14b
-; AVX2-NEXT:    movb %r14b, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    andb $15, %r11b
-; AVX2-NEXT:    movb %r11b, -{{[0-9]+}}(%rsp)
 ; AVX2-NEXT:    andb $15, %r10b
 ; AVX2-NEXT:    movb %r10b, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    andb $15, %r9b
-; AVX2-NEXT:    movb %r9b, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT:    andb $15, %r8b
-; AVX2-NEXT:    movb %r8b, -{{[0-9]+}}(%rsp)
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-NEXT:    vmovq %xmm0, %rax
-; AVX2-NEXT:    movq %rax, %rcx
+; AVX2-NEXT:    movq %rax, %r8
 ; AVX2-NEXT:    movq %rax, %rdx
 ; AVX2-NEXT:    movq %rax, %rsi
 ; AVX2-NEXT:    movq %rax, %rdi
-; AVX2-NEXT:    movl %eax, %ebp
 ; AVX2-NEXT:    movl %eax, %ebx
+; AVX2-NEXT:    movl %eax, %ecx
 ; AVX2-NEXT:    vmovd %eax, %xmm1
 ; AVX2-NEXT:    shrl $8, %eax
 ; AVX2-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    shrl $16, %ebx
-; AVX2-NEXT:    vpinsrb $2, %ebx, %xmm1, %xmm1
-; AVX2-NEXT:    shrl $24, %ebp
-; AVX2-NEXT:    vpinsrb $3, %ebp, %xmm1, %xmm1
+; AVX2-NEXT:    shrl $16, %ecx
+; AVX2-NEXT:    vpinsrb $2, %ecx, %xmm1, %xmm1
+; AVX2-NEXT:    shrl $24, %ebx
+; AVX2-NEXT:    vpinsrb $3, %ebx, %xmm1, %xmm1
 ; AVX2-NEXT:    shrq $32, %rdi
 ; AVX2-NEXT:    vpinsrb $4, %edi, %xmm1, %xmm1
 ; AVX2-NEXT:    shrq $40, %rsi
@@ -1325,8 +1317,8 @@ define <32 x i8> @_clearupper32xi8b(<32
 ; AVX2-NEXT:    shrq $48, %rdx
 ; AVX2-NEXT:    vpinsrb $6, %edx, %xmm1, %xmm1
 ; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    shrq $56, %rcx
-; AVX2-NEXT:    vpinsrb $7, %ecx, %xmm1, %xmm0
+; AVX2-NEXT:    shrq $56, %r8
+; AVX2-NEXT:    vpinsrb $7, %r8d, %xmm1, %xmm0
 ; AVX2-NEXT:    movl %eax, %ecx
 ; AVX2-NEXT:    shrl $8, %ecx
 ; AVX2-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0
@@ -1394,11 +1386,7 @@ define <32 x i8> @_clearupper32xi8b(<32
 ; AVX2-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm1
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    popq %rbx
-; AVX2-NEXT:    popq %r12
-; AVX2-NEXT:    popq %r13
 ; AVX2-NEXT:    popq %r14
-; AVX2-NEXT:    popq %r15
-; AVX2-NEXT:    popq %rbp
 ; AVX2-NEXT:    retq
   %x4  = bitcast <32 x i8> %0 to <64 x i4>
   %r0  = insertelement <64 x i4> %x4,  i4 zeroinitializer, i32 1

Modified: llvm/trunk/test/CodeGen/X86/wide-integer-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/wide-integer-cmp.ll?rev=306820&r1=306819&r2=306820&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/wide-integer-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/wide-integer-cmp.ll Fri Jun 30 05:56:02 2017
@@ -101,8 +101,8 @@ define i32 @test_wide(i128 %a, i128 %b)
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; CHECK-NEXT:    cmpl {{[0-9]+}}(%esp), %edx
 ; CHECK-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    jge .LBB4_2
 ; CHECK-NEXT:  # BB#1: # %bb1
 ; CHECK-NEXT:    movl $1, %eax




More information about the llvm-commits mailing list