[llvm] r333953 - Revert "Remove various use of undef in the X86 test suite as patern involving undef can collapse them. NFC"

Amaury Sechet via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 4 14:20:45 PDT 2018


Author: deadalnix
Date: Mon Jun  4 14:20:45 2018
New Revision: 333953

URL: http://llvm.org/viewvc/llvm-project?rev=333953&view=rev
Log:
Revert "Remove various use of undef in the X86 test suite as patern involving undef can collapse them. NFC"

This reverts commit f0e85c194ae5e87476bc767304470dec85b6774f.

Modified:
    llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll
    llvm/trunk/test/CodeGen/X86/pr34177.ll

Modified: llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll?rev=333953&r1=333952&r2=333953&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll Mon Jun  4 14:20:45 2018
@@ -4,39 +4,38 @@
 ; rdar://11314175: SD Scheduler, BuildSchedUnits assert:
 ;                  N->getNodeId() == -1 && "Node already inserted!
 
-define void @func(<4 x float> %a, <16 x i8> %b, <16 x i8> %c, <8 x float> %d, <8 x float> %e, <8 x float>* %f) nounwind ssp {
+define void @func() nounwind ssp {
 ; CHECK-LABEL: func:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqu 0, %xmm3
-; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm3, %ymm0
-; CHECK-NEXT:    vpalignr {{.*#+}} xmm1 = xmm3[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
-; CHECK-NEXT:    vmovdqu 32, %xmm3
-; CHECK-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[4,5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3]
-; CHECK-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; CHECK-NEXT:    vmovups 0, %xmm0
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,2,3,3]
+; CHECK-NEXT:    vbroadcastss 32, %xmm3
+; CHECK-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; CHECK-NEXT:    vmulps %ymm2, %ymm2, %ymm2
 ; CHECK-NEXT:    vmulps %ymm0, %ymm0, %ymm0
-; CHECK-NEXT:    vmulps %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vaddps %ymm0, %ymm2, %ymm0
 ; CHECK-NEXT:    vaddps %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    vmulps %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vaddps %ymm0, %ymm0, %ymm0
-; CHECK-NEXT:    vhaddps %ymm4, %ymm0, %ymm0
+; CHECK-NEXT:    vhaddps %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    vsubps %ymm0, %ymm0, %ymm0
 ; CHECK-NEXT:    vhaddps %ymm0, %ymm1, %ymm0
-; CHECK-NEXT:    vmovaps %ymm0, (%rdi)
+; CHECK-NEXT:    vmovaps %ymm0, (%rax)
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
   %tmp = load <4 x float>, <4 x float>* null, align 1
   %tmp14 = getelementptr <4 x float>, <4 x float>* null, i32 2
   %tmp15 = load <4 x float>, <4 x float>* %tmp14, align 1
   %tmp16 = shufflevector <4 x float> %tmp, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
-  %tmp17 = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %tmp16, <4 x float> %a, i8 1)
+  %tmp17 = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %tmp16, <4 x float> undef, i8 1)
   %tmp18 = bitcast <4 x float> %tmp to <16 x i8>
-  %tmp19 = shufflevector <16 x i8> %tmp18, <16 x i8> %b, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+  %tmp19 = shufflevector <16 x i8> %tmp18, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
   %tmp20 = bitcast <16 x i8> %tmp19 to <4 x float>
   %tmp21 = bitcast <4 x float> %tmp15 to <16 x i8>
-  %tmp22 = shufflevector <16 x i8> %c, <16 x i8> %tmp21, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
+  %tmp22 = shufflevector <16 x i8> undef, <16 x i8> %tmp21, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
   %tmp23 = bitcast <16 x i8> %tmp22 to <4 x float>
   %tmp24 = shufflevector <4 x float> %tmp20, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
   %tmp25 = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %tmp24, <4 x float> %tmp23, i8 1)
@@ -44,14 +43,14 @@ define void @func(<4 x float> %a, <16 x
   %tmp27 = fmul <8 x float> %tmp25, %tmp25
   %tmp28 = fadd <8 x float> %tmp26, %tmp27
   %tmp29 = fadd <8 x float> %tmp28, %tmp28
-  %tmp30 = shufflevector <8 x float> %tmp29, <8 x float> %d, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %tmp30 = shufflevector <8 x float> %tmp29, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %tmp31 = fmul <4 x float> %tmp30, %tmp30
   %tmp32 = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> zeroinitializer, <4 x float> %tmp31, i8 1)
   %tmp33 = fadd <8 x float> %tmp32, %tmp32
-  %tmp34 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %tmp33, <8 x float> %e) nounwind
+  %tmp34 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %tmp33, <8 x float> undef) nounwind
   %tmp35 = fsub <8 x float> %tmp34, %tmp34
   %tmp36 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> zeroinitializer, <8 x float> %tmp35) nounwind
-  store <8 x float> %tmp36, <8 x float>* %f, align 32
+  store <8 x float> %tmp36, <8 x float>* undef, align 32
   ret void
 }
 

Modified: llvm/trunk/test/CodeGen/X86/pr34177.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pr34177.ll?rev=333953&r1=333952&r2=333953&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pr34177.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pr34177.ll Mon Jun  4 14:20:45 2018
@@ -5,55 +5,48 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define void @test(<4x i64> %a, <4 x x86_fp80> %b, <8 x x86_fp80>* %c) local_unnamed_addr {
+define void @test() local_unnamed_addr {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpextrq $1, %xmm0, %r8
-; CHECK-NEXT:    vmovdqa {{.*#+}} xmm1 = [2,3]
-; CHECK-NEXT:    vmovq %xmm1, %r9
-; CHECK-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; CHECK-NEXT:    vmovq %xmm2, %rdx
-; CHECK-NEXT:    vpextrq $1, %xmm1, %rsi
-; CHECK-NEXT:    vpextrq $1, %xmm2, %rax
+; CHECK-NEXT:    vmovdqa {{.*#+}} xmm0 = [2,3]
+; CHECK-NEXT:    vpextrq $1, %xmm0, %rax
 ; CHECK-NEXT:    vmovq %xmm0, %rcx
-; CHECK-NEXT:    negq %rcx
+; CHECK-NEXT:    negq %rdx
 ; CHECK-NEXT:    fld1
 ; CHECK-NEXT:    fldz
 ; CHECK-NEXT:    fld %st(0)
 ; CHECK-NEXT:    fcmove %st(2), %st(0)
-; CHECK-NEXT:    cmpq %rax, %rsi
+; CHECK-NEXT:    cmpq %rax, %rcx
 ; CHECK-NEXT:    fld %st(1)
 ; CHECK-NEXT:    fcmove %st(3), %st(0)
-; CHECK-NEXT:    cmpq %rdx, %r9
+; CHECK-NEXT:    cmpq %rax, %rax
 ; CHECK-NEXT:    fld %st(2)
 ; CHECK-NEXT:    fcmove %st(4), %st(0)
 ; CHECK-NEXT:    movl $1, %eax
-; CHECK-NEXT:    cmpq %r8, %rax
+; CHECK-NEXT:    cmpq %rax, %rax
 ; CHECK-NEXT:    fxch %st(3)
 ; CHECK-NEXT:    fcmove %st(4), %st(0)
 ; CHECK-NEXT:    fstp %st(4)
-; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
-; CHECK-NEXT:    fstpt 70(%rdi)
-; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
-; CHECK-NEXT:    fstpt 50(%rdi)
-; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
-; CHECK-NEXT:    fstpt 30(%rdi)
-; CHECK-NEXT:    fldt {{[0-9]+}}(%rsp)
-; CHECK-NEXT:    fstpt 10(%rdi)
-; CHECK-NEXT:    fadd %st(0), %st(0)
-; CHECK-NEXT:    fstpt 60(%rdi)
 ; CHECK-NEXT:    fxch %st(1)
 ; CHECK-NEXT:    fadd %st(0), %st(0)
-; CHECK-NEXT:    fstpt 40(%rdi)
-; CHECK-NEXT:    fxch %st(1)
+; CHECK-NEXT:    fxch %st(3)
+; CHECK-NEXT:    fadd %st(0), %st(0)
+; CHECK-NEXT:    fxch %st(2)
 ; CHECK-NEXT:    fadd %st(0), %st(0)
-; CHECK-NEXT:    fstpt 20(%rdi)
+; CHECK-NEXT:    fxch %st(1)
 ; CHECK-NEXT:    fadd %st(0), %st(0)
-; CHECK-NEXT:    fstpt (%rdi)
-  %1 = icmp eq <4 x i64> <i64 0, i64 1, i64 2, i64 3>, %a
+; CHECK-NEXT:    fxch %st(3)
+; CHECK-NEXT:    fstpt (%rax)
+; CHECK-NEXT:    fxch %st(2)
+; CHECK-NEXT:    fstpt (%rax)
+; CHECK-NEXT:    fxch %st(1)
+; CHECK-NEXT:    fstpt (%rax)
+; CHECK-NEXT:    fstpt (%rax)
+  %1 = icmp eq <4 x i64> <i64 0, i64 1, i64 2, i64 3>, undef
   %2 = select <4 x i1> %1, <4 x x86_fp80> <x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000>, <4 x x86_fp80> zeroinitializer
   %3 = fadd <4 x x86_fp80> %2, %2
-  %4 = shufflevector <4 x x86_fp80> %3, <4 x x86_fp80> %b, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x x86_fp80> %4, <8 x x86_fp80>* %c, align 16
+  %4 = shufflevector <4 x x86_fp80> %3, <4 x x86_fp80> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  store <8 x x86_fp80> %4, <8 x x86_fp80>* undef, align 16
   unreachable
 }
+




More information about the llvm-commits mailing list