[llvm] r283072 - [x86] remove 'nan' strings from copysign assertions; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 2 10:07:24 PDT 2016


Author: spatel
Date: Sun Oct  2 12:07:24 2016
New Revision: 283072

URL: http://llvm.org/viewvc/llvm-project?rev=283072&view=rev
Log:
[x86] remove 'nan' strings from copysign assertions; NFC

Preemptively scrubbing these to avoid a bot fail as in PR30443:
https://llvm.org/bugs/show_bug.cgi?id=30443

I'm nearly done with a patch to fix these cases, so not trying very
hard to do better for the temporary win. 

I plan to use better checks than what the script produces for the vectorized cases.

Modified:
    llvm/trunk/test/CodeGen/X86/vec-copysign.ll

Modified: llvm/trunk/test/CodeGen/X86/vec-copysign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec-copysign.ll?rev=283072&r1=283071&r2=283072&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec-copysign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec-copysign.ll Sun Oct  2 12:07:24 2016
@@ -13,7 +13,7 @@ define <4 x float> @v4f32(<4 x float> %a
 ; SSE2-NEXT:    andps %xmm3, %xmm2
 ; SSE2-NEXT:    movaps %xmm0, %xmm4
 ; SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[3,1,2,3]
-; SSE2-NEXT:    movaps {{.*#+}} xmm5 = [nan,nan,nan,nan]
+; SSE2-NEXT:    movaps {{.*#+}} xmm5 
 ; SSE2-NEXT:    andps %xmm5, %xmm4
 ; SSE2-NEXT:    orps %xmm2, %xmm4
 ; SSE2-NEXT:    movaps %xmm1, %xmm2
@@ -43,7 +43,7 @@ define <4 x float> @v4f32(<4 x float> %a
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
 ; AVX-NEXT:    vandps %xmm2, %xmm1, %xmm3
-; AVX-NEXT:    vmovaps {{.*#+}} xmm4 = [nan,nan,nan,nan]
+; AVX-NEXT:    vmovaps {{.*#+}} xmm4
 ; AVX-NEXT:    vandps %xmm4, %xmm0, %xmm5
 ; AVX-NEXT:    vorps %xmm3, %xmm5, %xmm3
 ; AVX-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
@@ -80,7 +80,7 @@ define <8 x float> @v8f32(<8 x float> %a
 ; SSE2-NEXT:    andps %xmm8, %xmm0
 ; SSE2-NEXT:    movaps %xmm5, %xmm7
 ; SSE2-NEXT:    shufps {{.*#+}} xmm7 = xmm7[3,1,2,3]
-; SSE2-NEXT:    movaps {{.*#+}} xmm6 = [nan,nan,nan,nan]
+; SSE2-NEXT:    movaps {{.*#+}} xmm6
 ; SSE2-NEXT:    andps %xmm6, %xmm7
 ; SSE2-NEXT:    orps %xmm0, %xmm7
 ; SSE2-NEXT:    movaps %xmm2, %xmm0
@@ -139,7 +139,7 @@ define <8 x float> @v8f32(<8 x float> %a
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
 ; AVX-NEXT:    vandps %xmm2, %xmm4, %xmm5
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm6
-; AVX-NEXT:    vmovaps {{.*#+}} xmm3 = [nan,nan,nan,nan]
+; AVX-NEXT:    vmovaps {{.*#+}} xmm3
 ; AVX-NEXT:    vandps %xmm3, %xmm6, %xmm7
 ; AVX-NEXT:    vorps %xmm5, %xmm7, %xmm8
 ; AVX-NEXT:    vmovshdup {{.*#+}} xmm7 = xmm4[1,1,3,3]
@@ -194,7 +194,7 @@ define <2 x double> @v2f64(<2 x double>
 ; SSE2-NEXT:    movaps {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00]
 ; SSE2-NEXT:    movaps %xmm1, %xmm4
 ; SSE2-NEXT:    andps %xmm3, %xmm4
-; SSE2-NEXT:    movaps {{.*#+}} xmm5 = [nan,nan]
+; SSE2-NEXT:    movaps {{.*#+}} xmm5
 ; SSE2-NEXT:    movaps %xmm0, %xmm2
 ; SSE2-NEXT:    andps %xmm5, %xmm2
 ; SSE2-NEXT:    orps %xmm4, %xmm2
@@ -211,7 +211,7 @@ define <2 x double> @v2f64(<2 x double>
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vmovapd {{.*#+}} xmm2 = [-0.000000e+00,-0.000000e+00]
 ; AVX-NEXT:    vandpd %xmm2, %xmm1, %xmm3
-; AVX-NEXT:    vmovapd {{.*#+}} xmm4 = [nan,nan]
+; AVX-NEXT:    vmovapd {{.*#+}} xmm4
 ; AVX-NEXT:    vandpd %xmm4, %xmm0, %xmm5
 ; AVX-NEXT:    vorpd %xmm3, %xmm5, %xmm3
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
@@ -233,7 +233,7 @@ define <4 x double> @v4f64(<4 x double>
 ; SSE2-NEXT:    movaps {{.*#+}} xmm5 = [-0.000000e+00,-0.000000e+00]
 ; SSE2-NEXT:    movaps %xmm2, %xmm6
 ; SSE2-NEXT:    andps %xmm5, %xmm6
-; SSE2-NEXT:    movaps {{.*#+}} xmm7 = [nan,nan]
+; SSE2-NEXT:    movaps {{.*#+}} xmm7
 ; SSE2-NEXT:    andps %xmm7, %xmm0
 ; SSE2-NEXT:    orps %xmm6, %xmm0
 ; SSE2-NEXT:    movhlps {{.*#+}} xmm2 = xmm2[1,1]
@@ -262,7 +262,7 @@ define <4 x double> @v4f64(<4 x double>
 ; AVX-NEXT:    vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00]
 ; AVX-NEXT:    vandpd %xmm3, %xmm2, %xmm4
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX-NEXT:    vmovapd {{.*#+}} xmm6 = [nan,nan]
+; AVX-NEXT:    vmovapd {{.*#+}} xmm6
 ; AVX-NEXT:    vandpd %xmm6, %xmm5, %xmm7
 ; AVX-NEXT:    vorpd %xmm4, %xmm7, %xmm4
 ; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]




More information about the llvm-commits mailing list