[llvm] r224900 - [X86] Add missing memory variants to AVX false dependency breaking

Michael Kuperstein michael.m.kuperstein at intel.com
Sun Dec 28 05:15:05 PST 2014


Author: mkuper
Date: Sun Dec 28 07:15:05 2014
New Revision: 224900

URL: http://llvm.org/viewvc/llvm-project?rev=224900&view=rev
Log:
[X86] Add missing memory variants to AVX false dependency breaking

Adds missing memory instruction variants to AVX false dependency breaking handling. (SSE was handled in r224246)

Differential Revision: http://reviews.llvm.org/D6780

Added:
    llvm/trunk/test/CodeGen/X86/break-false-dep.ll
      - copied, changed from r224755, llvm/trunk/test/CodeGen/X86/break-sse-dep.ll
Removed:
    llvm/trunk/test/CodeGen/X86/break-avx-dep.ll
    llvm/trunk/test/CodeGen/X86/break-sse-dep.ll
Modified:
    llvm/trunk/lib/Target/X86/X86InstrInfo.cpp

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=224900&r1=224899&r2=224900&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Sun Dec 28 07:15:05 2014
@@ -4554,28 +4554,52 @@ getPartialRegUpdateClearance(const Machi
 static bool hasUndefRegUpdate(unsigned Opcode) {
   switch (Opcode) {
   case X86::VCVTSI2SSrr:
+  case X86::VCVTSI2SSrm:
   case X86::Int_VCVTSI2SSrr:
+  case X86::Int_VCVTSI2SSrm:
   case X86::VCVTSI2SS64rr:
+  case X86::VCVTSI2SS64rm:
   case X86::Int_VCVTSI2SS64rr:
+  case X86::Int_VCVTSI2SS64rm:
   case X86::VCVTSI2SDrr:
+  case X86::VCVTSI2SDrm:
   case X86::Int_VCVTSI2SDrr:
+  case X86::Int_VCVTSI2SDrm:
   case X86::VCVTSI2SD64rr:
+  case X86::VCVTSI2SD64rm:
   case X86::Int_VCVTSI2SD64rr:
+  case X86::Int_VCVTSI2SD64rm:
   case X86::VCVTSD2SSrr:
+  case X86::VCVTSD2SSrm:
   case X86::Int_VCVTSD2SSrr:
+  case X86::Int_VCVTSD2SSrm:
   case X86::VCVTSS2SDrr:
+  case X86::VCVTSS2SDrm:
   case X86::Int_VCVTSS2SDrr:
+  case X86::Int_VCVTSS2SDrm:
   case X86::VRCPSSr:
+  case X86::VRCPSSm:
+  case X86::VRCPSSm_Int:
   case X86::VROUNDSDr:
+  case X86::VROUNDSDm:
   case X86::VROUNDSDr_Int:
   case X86::VROUNDSSr:
+  case X86::VROUNDSSm:
   case X86::VROUNDSSr_Int:
   case X86::VRSQRTSSr:
+  case X86::VRSQRTSSm:
+  case X86::VRSQRTSSm_Int:
   case X86::VSQRTSSr:
-
-  // AVX-512
+  case X86::VSQRTSSm:
+  case X86::VSQRTSSm_Int:
+  case X86::VSQRTSDr:
+  case X86::VSQRTSDm:
+  case X86::VSQRTSDm_Int:
+    // AVX-512
   case X86::VCVTSD2SSZrr:
+  case X86::VCVTSD2SSZrm:
   case X86::VCVTSS2SDZrr:
+  case X86::VCVTSS2SDZrm:
     return true;
   }
 

Removed: llvm/trunk/test/CodeGen/X86/break-avx-dep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/break-avx-dep.ll?rev=224899&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/break-avx-dep.ll (original)
+++ llvm/trunk/test/CodeGen/X86/break-avx-dep.ll (removed)
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s
-;
-; rdar:15221834 False AVX register dependencies cause 5x slowdown on
-; flops-6. Make sure the unused register read by vcvtsi2sdq is zeroed
-; to avoid cyclic dependence on a write to the same register in a
-; previous iteration.
-
-; CHECK-LABEL: t1:
-; CHECK-LABEL: %loop
-; CHECK: vxorps %[[REG:xmm.]], %{{xmm.}}, %{{xmm.}}
-; CHECK: vcvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]], %{{xmm.}}
-define i64 @t1(i64* nocapture %x, double* nocapture %y) nounwind {
-entry:
-  %vx = load i64* %x
-  br label %loop
-loop:
-  %i = phi i64 [ 1, %entry ], [ %inc, %loop ]
-  %s1 = phi i64 [ %vx, %entry ], [ %s2, %loop ]
-  %fi = sitofp i64 %i to double
-  %vy = load double* %y
-  %fipy = fadd double %fi, %vy
-  %iipy = fptosi double %fipy to i64
-  %s2 = add i64 %s1, %iipy
-  %inc = add nsw i64 %i, 1
-  %exitcond = icmp eq i64 %inc, 156250000
-  br i1 %exitcond, label %ret, label %loop
-ret:
-  ret i64 %s2
-}

Copied: llvm/trunk/test/CodeGen/X86/break-false-dep.ll (from r224755, llvm/trunk/test/CodeGen/X86/break-sse-dep.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/break-false-dep.ll?p2=llvm/trunk/test/CodeGen/X86/break-false-dep.ll&p1=llvm/trunk/test/CodeGen/X86/break-sse-dep.ll&r1=224755&r2=224900&rev=224900&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/break-sse-dep.ll (original)
+++ llvm/trunk/test/CodeGen/X86/break-false-dep.ll Sun Dec 28 07:15:05 2014
@@ -1,11 +1,12 @@
-; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 -mcpu=nehalem | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2 -mcpu=nehalem | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 -mcpu=nehalem | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2 -mcpu=nehalem | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
 
 define double @t1(float* nocapture %x) nounwind readonly ssp {
 entry:
-; CHECK-LABEL: t1:
-; CHECK: movss ([[A0:%rdi|%rcx]]), %xmm0
-; CHECK: cvtss2sd %xmm0, %xmm0
+; SSE-LABEL: t1:
+; SSE: movss ([[A0:%rdi|%rcx]]), %xmm0
+; SSE: cvtss2sd %xmm0, %xmm0
 
   %0 = load float* %x, align 4
   %1 = fpext float %0 to double
@@ -14,8 +15,8 @@ entry:
 
 define float @t2(double* nocapture %x) nounwind readonly ssp optsize {
 entry:
-; CHECK-LABEL: t2:
-; CHECK: cvtsd2ss ([[A0]]), %xmm0
+; SSE-LABEL: t2:
+; SSE: cvtsd2ss ([[A0]]), %xmm0
   %0 = load double* %x, align 8
   %1 = fptrunc double %0 to float
   ret float %1
@@ -23,9 +24,9 @@ entry:
 
 define float @squirtf(float* %x) nounwind {
 entry:
-; CHECK-LABEL: squirtf:
-; CHECK: movss ([[A0]]), %xmm0
-; CHECK: sqrtss %xmm0, %xmm0
+; SSE-LABEL: squirtf:
+; SSE: movss ([[A0]]), %xmm0
+; SSE: sqrtss %xmm0, %xmm0
   %z = load float* %x
   %t = call float @llvm.sqrt.f32(float %z)
   ret float %t
@@ -33,9 +34,9 @@ entry:
 
 define double @squirt(double* %x) nounwind {
 entry:
-; CHECK-LABEL: squirt:
-; CHECK: movsd ([[A0]]), %xmm0
-; CHECK: sqrtsd %xmm0, %xmm0
+; SSE-LABEL: squirt:
+; SSE: movsd ([[A0]]), %xmm0
+; SSE: sqrtsd %xmm0, %xmm0
   %z = load double* %x
   %t = call double @llvm.sqrt.f64(double %z)
   ret double %t
@@ -43,8 +44,8 @@ entry:
 
 define float @squirtf_size(float* %x) nounwind optsize {
 entry:
-; CHECK-LABEL: squirtf_size:
-; CHECK: sqrtss ([[A0]]), %xmm0
+; SSE-LABEL: squirtf_size:
+; SSE: sqrtss ([[A0]]), %xmm0
   %z = load float* %x
   %t = call float @llvm.sqrt.f32(float %z)
   ret float %t
@@ -52,8 +53,8 @@ entry:
 
 define double @squirt_size(double* %x) nounwind optsize {
 entry:
-; CHECK-LABEL: squirt_size:
-; CHECK: sqrtsd ([[A0]]), %xmm0
+; SSE-LABEL: squirt_size:
+; SSE: sqrtsd ([[A0]]), %xmm0
   %z = load double* %x
   %t = call double @llvm.sqrt.f64(double %z)
   ret double %t
@@ -62,8 +63,8 @@ entry:
 declare float @llvm.sqrt.f32(float)
 declare double @llvm.sqrt.f64(double)
 
-; CHECK-LABEL: loopdep1
-; CHECK: for.body
+; SSE-LABEL: loopdep1
+; SSE: for.body
 ;
 ; This loop contains two cvtsi2ss instructions that update the same xmm
 ; register.  Verify that the execution dependency fix pass breaks those
@@ -71,12 +72,12 @@ declare double @llvm.sqrt.f64(double)
 ;
 ; If the register allocator chooses different registers for the two cvtsi2ss
 ; instructions, they are still dependent on themselves.
-; CHECK: xorps [[XMM1:%xmm[0-9]+]]
-; CHECK: , [[XMM1]]
-; CHECK: cvtsi2ssl %{{.*}}, [[XMM1]]
-; CHECK: xorps [[XMM2:%xmm[0-9]+]]
-; CHECK: , [[XMM2]]
-; CHECK: cvtsi2ssl %{{.*}}, [[XMM2]]
+; SSE: xorps [[XMM1:%xmm[0-9]+]]
+; SSE: , [[XMM1]]
+; SSE: cvtsi2ssl %{{.*}}, [[XMM1]]
+; SSE: xorps [[XMM2:%xmm[0-9]+]]
+; SSE: , [[XMM2]]
+; SSE: cvtsi2ssl %{{.*}}, [[XMM2]]
 ;
 define float @loopdep1(i32 %m) nounwind uwtable readnone ssp {
 entry:
@@ -104,6 +105,38 @@ for.end:
   ret float %sub
 }
 
+; rdar:15221834 False AVX register dependencies cause 5x slowdown on
+; flops-6. Make sure the unused register read by vcvtsi2sdq is zeroed
+; to avoid cyclic dependence on a write to the same register in a
+; previous iteration.
+
+; AVX-LABEL: loopdep2:
+; AVX-LABEL: %loop
+; AVX: vxorps %[[REG:xmm.]], %{{xmm.}}, %{{xmm.}}
+; AVX: vcvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]], %{{xmm.}}
+; SSE-LABEL: loopdep2:
+; SSE-LABEL: %loop
+; SSE: xorps %[[REG:xmm.]], %[[REG]]
+; SSE: cvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]]
+define i64 @loopdep2(i64* nocapture %x, double* nocapture %y) nounwind {
+entry:
+  %vx = load i64* %x
+  br label %loop
+loop:
+  %i = phi i64 [ 1, %entry ], [ %inc, %loop ]
+  %s1 = phi i64 [ %vx, %entry ], [ %s2, %loop ]
+  %fi = sitofp i64 %i to double
+  %vy = load double* %y
+  %fipy = fadd double %fi, %vy
+  %iipy = fptosi double %fipy to i64
+  %s2 = add i64 %s1, %iipy
+  %inc = add nsw i64 %i, 1
+  %exitcond = icmp eq i64 %inc, 156250000
+  br i1 %exitcond, label %ret, label %loop
+ret:
+  ret i64 %s2
+}
+
 ; This loop contains a cvtsi2sd instruction that has a loop-carried
 ; false dependency on an xmm that is modified by other scalar instructions
 ; that follow it in the loop. Additionally, the source of convert is a 
@@ -115,7 +148,7 @@ for.end:
 @w = common global [1024 x double] zeroinitializer, align 16
 @v = common global [1024 x i32] zeroinitializer, align 16
 
-define void @loopdep2() {
+define void @loopdep3() {
 entry:
   br label %for.cond1.preheader
 
@@ -151,11 +184,18 @@ for.inc14:
 for.end16:                                        ; preds = %for.inc14
   ret void
 
-;CHECK-LABEL:@loopdep2
-;CHECK: xorps [[XMM0:%xmm[0-9]+]], [[XMM0]]
-;CHECK-NEXT: cvtsi2sdl {{.*}}, [[XMM0]]
-;CHECK-NEXT: mulsd {{.*}}, [[XMM0]]
-;CHECK-NEXT: mulsd {{.*}}, [[XMM0]]
-;CHECK-NEXT: mulsd {{.*}}, [[XMM0]]
-;CHECK-NEXT: movsd [[XMM0]],
+;SSE-LABEL:@loopdep3
+;SSE: xorps [[XMM0:%xmm[0-9]+]], [[XMM0]]
+;SSE-NEXT: cvtsi2sdl {{.*}}, [[XMM0]]
+;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
+;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
+;SSE-NEXT: mulsd {{.*}}, [[XMM0]]
+;SSE-NEXT: movsd [[XMM0]],
+;AVX-LABEL:@loopdep3
+;AVX: vxorps [[XMM0:%xmm[0-9]+]], [[XMM0]]
+;AVX-NEXT: vcvtsi2sdl {{.*}}, [[XMM0]], [[XMM0]]
+;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
+;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
+;AVX-NEXT: vmulsd {{.*}}, [[XMM0]], [[XMM0]]
+;AVX-NEXT: vmovsd [[XMM0]],
 }

Removed: llvm/trunk/test/CodeGen/X86/break-sse-dep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/break-sse-dep.ll?rev=224899&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/break-sse-dep.ll (original)
+++ llvm/trunk/test/CodeGen/X86/break-sse-dep.ll (removed)
@@ -1,161 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 -mcpu=nehalem | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2 -mcpu=nehalem | FileCheck %s
-
-define double @t1(float* nocapture %x) nounwind readonly ssp {
-entry:
-; CHECK-LABEL: t1:
-; CHECK: movss ([[A0:%rdi|%rcx]]), %xmm0
-; CHECK: cvtss2sd %xmm0, %xmm0
-
-  %0 = load float* %x, align 4
-  %1 = fpext float %0 to double
-  ret double %1
-}
-
-define float @t2(double* nocapture %x) nounwind readonly ssp optsize {
-entry:
-; CHECK-LABEL: t2:
-; CHECK: cvtsd2ss ([[A0]]), %xmm0
-  %0 = load double* %x, align 8
-  %1 = fptrunc double %0 to float
-  ret float %1
-}
-
-define float @squirtf(float* %x) nounwind {
-entry:
-; CHECK-LABEL: squirtf:
-; CHECK: movss ([[A0]]), %xmm0
-; CHECK: sqrtss %xmm0, %xmm0
-  %z = load float* %x
-  %t = call float @llvm.sqrt.f32(float %z)
-  ret float %t
-}
-
-define double @squirt(double* %x) nounwind {
-entry:
-; CHECK-LABEL: squirt:
-; CHECK: movsd ([[A0]]), %xmm0
-; CHECK: sqrtsd %xmm0, %xmm0
-  %z = load double* %x
-  %t = call double @llvm.sqrt.f64(double %z)
-  ret double %t
-}
-
-define float @squirtf_size(float* %x) nounwind optsize {
-entry:
-; CHECK-LABEL: squirtf_size:
-; CHECK: sqrtss ([[A0]]), %xmm0
-  %z = load float* %x
-  %t = call float @llvm.sqrt.f32(float %z)
-  ret float %t
-}
-
-define double @squirt_size(double* %x) nounwind optsize {
-entry:
-; CHECK-LABEL: squirt_size:
-; CHECK: sqrtsd ([[A0]]), %xmm0
-  %z = load double* %x
-  %t = call double @llvm.sqrt.f64(double %z)
-  ret double %t
-}
-
-declare float @llvm.sqrt.f32(float)
-declare double @llvm.sqrt.f64(double)
-
-; CHECK-LABEL: loopdep1
-; CHECK: for.body
-;
-; This loop contains two cvtsi2ss instructions that update the same xmm
-; register.  Verify that the execution dependency fix pass breaks those
-; dependencies by inserting xorps instructions.
-;
-; If the register allocator chooses different registers for the two cvtsi2ss
-; instructions, they are still dependent on themselves.
-; CHECK: xorps [[XMM1:%xmm[0-9]+]]
-; CHECK: , [[XMM1]]
-; CHECK: cvtsi2ssl %{{.*}}, [[XMM1]]
-; CHECK: xorps [[XMM2:%xmm[0-9]+]]
-; CHECK: , [[XMM2]]
-; CHECK: cvtsi2ssl %{{.*}}, [[XMM2]]
-;
-define float @loopdep1(i32 %m) nounwind uwtable readnone ssp {
-entry:
-  %tobool3 = icmp eq i32 %m, 0
-  br i1 %tobool3, label %for.end, label %for.body
-
-for.body:                                         ; preds = %entry, %for.body
-  %m.addr.07 = phi i32 [ %dec, %for.body ], [ %m, %entry ]
-  %s1.06 = phi float [ %add, %for.body ], [ 0.000000e+00, %entry ]
-  %s2.05 = phi float [ %add2, %for.body ], [ 0.000000e+00, %entry ]
-  %n.04 = phi i32 [ %inc, %for.body ], [ 1, %entry ]
-  %conv = sitofp i32 %n.04 to float
-  %add = fadd float %s1.06, %conv
-  %conv1 = sitofp i32 %m.addr.07 to float
-  %add2 = fadd float %s2.05, %conv1
-  %inc = add nsw i32 %n.04, 1
-  %dec = add nsw i32 %m.addr.07, -1
-  %tobool = icmp eq i32 %dec, 0
-  br i1 %tobool, label %for.end, label %for.body
-
-for.end:                                          ; preds = %for.body, %entry
-  %s1.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
-  %s2.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %add2, %for.body ]
-  %sub = fsub float %s1.0.lcssa, %s2.0.lcssa
-  ret float %sub
-}
-
-; This loop contains a cvtsi2sd instruction that has a loop-carried
-; false dependency on an xmm that is modified by other scalar instructions
-; that follow it in the loop. Additionally, the source of convert is a 
-; memory operand. Verify the execution dependency fix pass breaks this
-; dependency by inserting a xor before the convert.
- at x = common global [1024 x double] zeroinitializer, align 16
- at y = common global [1024 x double] zeroinitializer, align 16
- at z = common global [1024 x double] zeroinitializer, align 16
- at w = common global [1024 x double] zeroinitializer, align 16
- at v = common global [1024 x i32] zeroinitializer, align 16
-
-define void @loopdep2() {
-entry:
-  br label %for.cond1.preheader
-
-for.cond1.preheader:                              ; preds = %for.inc14, %entry
-  %i.025 = phi i32 [ 0, %entry ], [ %inc15, %for.inc14 ]
-  br label %for.body3
-
-for.body3:
-  %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
-  %arrayidx = getelementptr inbounds [1024 x i32]* @v, i64 0, i64 %indvars.iv
-  %0 = load i32* %arrayidx, align 4
-  %conv = sitofp i32 %0 to double
-  %arrayidx5 = getelementptr inbounds [1024 x double]* @x, i64 0, i64 %indvars.iv
-  %1 = load double* %arrayidx5, align 8
-  %mul = fmul double %conv, %1
-  %arrayidx7 = getelementptr inbounds [1024 x double]* @y, i64 0, i64 %indvars.iv
-  %2 = load double* %arrayidx7, align 8
-  %mul8 = fmul double %mul, %2
-  %arrayidx10 = getelementptr inbounds [1024 x double]* @z, i64 0, i64 %indvars.iv
-  %3 = load double* %arrayidx10, align 8
-  %mul11 = fmul double %mul8, %3
-  %arrayidx13 = getelementptr inbounds [1024 x double]* @w, i64 0, i64 %indvars.iv
-  store double %mul11, double* %arrayidx13, align 8
-  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-  %exitcond = icmp eq i64 %indvars.iv.next, 1024
-  br i1 %exitcond, label %for.inc14, label %for.body3
-
-for.inc14:                                        ; preds = %for.body3
-  %inc15 = add nsw i32 %i.025, 1
-  %exitcond26 = icmp eq i32 %inc15, 100000
-  br i1 %exitcond26, label %for.end16, label %for.cond1.preheader
-
-for.end16:                                        ; preds = %for.inc14
-  ret void
-
-;CHECK-LABEL:@loopdep2
-;CHECK: xorps [[XMM0:%xmm[0-9]+]], [[XMM0]]
-;CHECK-NEXT: cvtsi2sdl {{.*}}, [[XMM0]]
-;CHECK-NEXT: mulsd {{.*}}, [[XMM0]]
-;CHECK-NEXT: mulsd {{.*}}, [[XMM0]]
-;CHECK-NEXT: mulsd {{.*}}, [[XMM0]]
-;CHECK-NEXT: movsd [[XMM0]],
-}





More information about the llvm-commits mailing list