[llvm] r228495 - [X86] Force fp stack folding tests to keep to specific domain.

Simon Pilgrim llvm-dev at redking.me.uk
Sat Feb 7 08:14:56 PST 2015


Author: rksimon
Date: Sat Feb  7 10:14:55 2015
New Revision: 228495

URL: http://llvm.org/viewvc/llvm-project?rev=228495&view=rev
Log:
[X86] Force fp stack folding tests to keep to specific domain.

General boolean instructions (AND, ANDN, OR, XOR) need to use a specific domain instruction (and not just the default).

Modified:
    llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx1.ll
    llvm/trunk/test/CodeGen/X86/stack-folding-fp-sse42.ll

Modified: llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx1.ll?rev=228495&r1=228494&r2=228495&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx1.ll Sat Feb  7 10:14:55 2015
@@ -147,7 +147,9 @@ define <4 x float> @stack_fold_andnps(<4
   %4 = xor <2 x i64> %2, <i64 -1, i64 -1>
   %5 = and <2 x i64> %4, %3
   %6 = bitcast <2 x i64> %5 to <4 x float>
-  ret <4 x float> %6
+  ; fadd forces execution domain
+  %7 = fadd <4 x float> %6, <float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <4 x float> %7
 }
 
 define <8 x float> @stack_fold_andnps_ymm(<8 x float> %a0, <8 x float> %a1) {
@@ -159,7 +161,9 @@ define <8 x float> @stack_fold_andnps_ym
   %4 = xor <4 x i64> %2, <i64 -1, i64 -1, i64 -1, i64 -1>
   %5 = and <4 x i64> %4, %3
   %6 = bitcast <4 x i64> %5 to <8 x float>
-  ret <8 x float> %6
+  ; fadd forces execution domain
+  %7 = fadd <8 x float> %6, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <8 x float> %7
 }
 
 define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) {
@@ -196,7 +200,9 @@ define <4 x float> @stack_fold_andps(<4
   %3 = bitcast <4 x float> %a1 to <2 x i64>
   %4 = and <2 x i64> %2, %3
   %5 = bitcast <2 x i64> %4 to <4 x float>
-  ret <4 x float> %5
+  ; fadd forces execution domain
+  %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <4 x float> %6
 }
 
 define <8 x float> @stack_fold_andps_ymm(<8 x float> %a0, <8 x float> %a1) {
@@ -207,7 +213,9 @@ define <8 x float> @stack_fold_andps_ymm
   %3 = bitcast <8 x float> %a1 to <4 x i64>
   %4 = and <4 x i64> %2, %3
   %5 = bitcast <4 x i64> %4 to <8 x float>
-  ret <8 x float> %5
+  ; fadd forces execution domain
+  %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <8 x float> %6
 }
 
 define <2 x double> @stack_fold_blendpd(<2 x double> %a0, <2 x double> %a1) {
@@ -843,7 +851,6 @@ declare <8 x float> @llvm.x86.avx.dp.ps.
 define <4 x float> @stack_fold_extractf128(<8 x float> %a0, <8 x float> %a1) {
   ;CHECK-LABEL: stack_fold_extractf128
   ;CHECK:       vextractf128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
-  ;CHECK:       vmovaps {{-?[0-9]*}}(%rsp), %xmm0 {{.*#+}} 16-byte Reload
   %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
   ret <4 x float> %1
@@ -1239,7 +1246,9 @@ define <4 x float> @stack_fold_orps(<4 x
   %3 = bitcast <4 x float> %a1 to <2 x i64>
   %4 = or <2 x i64> %2, %3
   %5 = bitcast <2 x i64> %4 to <4 x float>
-  ret <4 x float> %5
+  ; fadd forces execution domain
+  %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <4 x float> %6
 }
 
 define <8 x float> @stack_fold_orps_ymm(<8 x float> %a0, <8 x float> %a1) {
@@ -1250,7 +1259,9 @@ define <8 x float> @stack_fold_orps_ymm(
   %3 = bitcast <8 x float> %a1 to <4 x i64>
   %4 = or <4 x i64> %2, %3
   %5 = bitcast <4 x i64> %4 to <8 x float>
-  ret <8 x float> %5
+  ; fadd forces execution domain
+  %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <8 x float> %6
 }
 
 define <8 x float> @stack_fold_perm2f128(<8 x float> %a0, <8 x float> %a1) {
@@ -1781,7 +1792,9 @@ define <4 x float> @stack_fold_xorps(<4
   %3 = bitcast <4 x float> %a1 to <2 x i64>
   %4 = xor <2 x i64> %2, %3
   %5 = bitcast <2 x i64> %4 to <4 x float>
-  ret <4 x float> %5
+  ; fadd forces execution domain
+  %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <4 x float> %6
 }
 
 define <8 x float> @stack_fold_xorps_ymm(<8 x float> %a0, <8 x float> %a1) {
@@ -1792,5 +1805,7 @@ define <8 x float> @stack_fold_xorps_ymm
   %3 = bitcast <8 x float> %a1 to <4 x i64>
   %4 = xor <4 x i64> %2, %3
   %5 = bitcast <4 x i64> %4 to <8 x float>
-  ret <8 x float> %5
+  ; fadd forces execution domain
+  %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <8 x float> %6
 }

Modified: llvm/trunk/test/CodeGen/X86/stack-folding-fp-sse42.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-fp-sse42.ll?rev=228495&r1=228494&r2=228495&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/stack-folding-fp-sse42.ll (original)
+++ llvm/trunk/test/CodeGen/X86/stack-folding-fp-sse42.ll Sat Feb  7 10:14:55 2015
@@ -99,7 +99,9 @@ define <4 x float> @stack_fold_andnps(<4
   %4 = xor <2 x i64> %2, <i64 -1, i64 -1>
   %5 = and <2 x i64> %4, %3
   %6 = bitcast <2 x i64> %5 to <4 x float>
-  ret <4 x float> %6
+  ; fadd forces execution domain
+  %7 = fadd <4 x float> %6, <float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <4 x float> %7
 }
 
 define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) {
@@ -123,7 +125,9 @@ define <4 x float> @stack_fold_andps(<4
   %3 = bitcast <4 x float> %a1 to <2 x i64>
   %4 = and <2 x i64> %2, %3
   %5 = bitcast <2 x i64> %4 to <4 x float>
-  ret <4 x float> %5
+  ; fadd forces execution domain
+  %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <4 x float> %6
 }
 
 define <2 x double> @stack_fold_blendpd(<2 x double> %a0, <2 x double> %a1) {
@@ -837,7 +841,9 @@ define <4 x float> @stack_fold_orps(<4 x
   %3 = bitcast <4 x float> %a1 to <2 x i64>
   %4 = or <2 x i64> %2, %3
   %5 = bitcast <2 x i64> %4 to <4 x float>
-  ret <4 x float> %5
+  ; fadd forces execution domain
+  %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <4 x float> %6
 }
 
 ; TODO stack_fold_rcpps
@@ -1077,5 +1083,7 @@ define <4 x float> @stack_fold_xorps(<4
   %3 = bitcast <4 x float> %a1 to <2 x i64>
   %4 = xor <2 x i64> %2, %3
   %5 = bitcast <2 x i64> %4 to <4 x float>
-  ret <4 x float> %5
+  ; fadd forces execution domain
+  %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+  ret <4 x float> %6
 }





More information about the llvm-commits mailing list