[llvm] r369793 - [X86] Automatically generate load-local-v3i1.ll . NFC

Amaury Sechet via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 23 11:12:33 PDT 2019


Author: deadalnix
Date: Fri Aug 23 11:12:33 2019
New Revision: 369793

URL: http://llvm.org/viewvc/llvm-project?rev=369793&view=rev
Log:
[X86] Automatically generate load-local-v3i1.ll . NFC

Modified:
    llvm/trunk/test/CodeGen/X86/load-local-v3i1.ll

Modified: llvm/trunk/test/CodeGen/X86/load-local-v3i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/load-local-v3i1.ll?rev=369793&r1=369792&r2=369793&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/load-local-v3i1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/load-local-v3i1.ll Fri Aug 23 11:12:33 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s
 
 ; widen a v3i1 to v4i1 to do a vector load/store. We would previously
@@ -12,6 +13,37 @@ declare <3 x i32> @llvm.masked.load.v3i3
 declare void @llvm.masked.store.v3i32.p1v3i32(<3 x i32>, <3 x i32> addrspace(1)*, i32, <3 x i1>)
 
 define  <3 x i32> @masked_load_v3(i32 addrspace(1)*, <3 x i1>) {
+; CHECK-LABEL: masked_load_v3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andb $1, %sil
+; CHECK-NEXT:    andb $1, %dl
+; CHECK-NEXT:    addb %dl, %dl
+; CHECK-NEXT:    orb %sil, %dl
+; CHECK-NEXT:    andb $1, %cl
+; CHECK-NEXT:    shlb $2, %cl
+; CHECK-NEXT:    orb %dl, %cl
+; CHECK-NEXT:    testb $1, %cl
+; CHECK-NEXT:    # implicit-def: $xmm0
+; CHECK-NEXT:    jne .LBB0_1
+; CHECK-NEXT:  # %bb.2: # %else
+; CHECK-NEXT:    testb $2, %cl
+; CHECK-NEXT:    jne .LBB0_3
+; CHECK-NEXT:  .LBB0_4: # %else2
+; CHECK-NEXT:    testb $4, %cl
+; CHECK-NEXT:    jne .LBB0_5
+; CHECK-NEXT:  .LBB0_6: # %else5
+; CHECK-NEXT:    retq
+; CHECK-NEXT:  .LBB0_1: # %cond.load
+; CHECK-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    testb $2, %cl
+; CHECK-NEXT:    je .LBB0_4
+; CHECK-NEXT:  .LBB0_3: # %cond.load1
+; CHECK-NEXT:    pinsrd $1, 4(%rdi), %xmm0
+; CHECK-NEXT:    testb $4, %cl
+; CHECK-NEXT:    je .LBB0_6
+; CHECK-NEXT:  .LBB0_5: # %cond.load4
+; CHECK-NEXT:    pinsrd $2, 8(%rdi), %xmm0
+; CHECK-NEXT:    retq
 entry:
   %2 = bitcast i32 addrspace(1)* %0 to <3 x i32> addrspace(1)*
   %3 = call <3 x i32> @llvm.masked.load.v3i32.p1v3i32(<3 x i32> addrspace(1)* %2, i32 4, <3 x i1> %1, <3 x i32> undef)
@@ -19,6 +51,36 @@ entry:
 }
 
 define void @masked_store4_v3(<3 x i32>, i32 addrspace(1)*, <3 x i1>) {
+; CHECK-LABEL: masked_store4_v3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andb $1, %sil
+; CHECK-NEXT:    andb $1, %dl
+; CHECK-NEXT:    addb %dl, %dl
+; CHECK-NEXT:    orb %sil, %dl
+; CHECK-NEXT:    andb $1, %cl
+; CHECK-NEXT:    shlb $2, %cl
+; CHECK-NEXT:    orb %dl, %cl
+; CHECK-NEXT:    testb $1, %cl
+; CHECK-NEXT:    jne .LBB1_1
+; CHECK-NEXT:  # %bb.2: # %else
+; CHECK-NEXT:    testb $2, %cl
+; CHECK-NEXT:    jne .LBB1_3
+; CHECK-NEXT:  .LBB1_4: # %else2
+; CHECK-NEXT:    testb $4, %cl
+; CHECK-NEXT:    jne .LBB1_5
+; CHECK-NEXT:  .LBB1_6: # %else4
+; CHECK-NEXT:    retq
+; CHECK-NEXT:  .LBB1_1: # %cond.store
+; CHECK-NEXT:    movss %xmm0, (%rdi)
+; CHECK-NEXT:    testb $2, %cl
+; CHECK-NEXT:    je .LBB1_4
+; CHECK-NEXT:  .LBB1_3: # %cond.store1
+; CHECK-NEXT:    extractps $1, %xmm0, 4(%rdi)
+; CHECK-NEXT:    testb $4, %cl
+; CHECK-NEXT:    je .LBB1_6
+; CHECK-NEXT:  .LBB1_5: # %cond.store3
+; CHECK-NEXT:    extractps $2, %xmm0, 8(%rdi)
+; CHECK-NEXT:    retq
 entry:
   %3 = bitcast i32 addrspace(1)* %1 to <3 x i32> addrspace(1)*
   call void @llvm.masked.store.v3i32.p1v3i32(<3 x i32> %0, <3 x i32> addrspace(1)* %3, i32 4, <3 x i1> %2)
@@ -28,41 +90,41 @@ entry:
 define void @local_load_v3i1(i32 addrspace(1)* %out, i32 addrspace(1)* %in, <3 x i1>* %predicate_ptr) nounwind {
 ; CHECK-LABEL: local_load_v3i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT: pushq   %rbp
-; CHECK-NEXT: pushq   %r15
-; CHECK-NEXT: pushq   %r14
-; CHECK-NEXT: pushq   %rbx
-; CHECK-NEXT: pushq   %rax
-; CHECK-NEXT: movq    %rdi, %r14
-; CHECK-NEXT: movzbl  (%rdx), %ebp
-; CHECK-NEXT: movl    %ebp, %eax
-; CHECK-NEXT: shrl    %eax
-; CHECK-NEXT: andl    $1, %eax
-; CHECK-NEXT: movl    %ebp, %ecx
-; CHECK-NEXT: andl    $1, %ecx
-; CHECK-NEXT: movd    %ecx, %xmm0
-; CHECK-NEXT: pinsrd  $1, %eax, %xmm0
-; CHECK-NEXT: shrl    $2, %ebp
-; CHECK-NEXT: andl    $1, %ebp
-; CHECK-NEXT: pinsrd  $2, %ebp, %xmm0
-; CHECK-NEXT: movd    %xmm0, %ebx
-; CHECK-NEXT: pextrd  $1, %xmm0, %r15d
-; CHECK-NEXT: movq    %rsi, %rdi
-; CHECK-NEXT: movl    %ebx, %esi
-; CHECK-NEXT: movl    %r15d, %edx
-; CHECK-NEXT: movl    %ebp, %ecx
-; CHECK-NEXT: callq   masked_load_v3
-; CHECK-NEXT: movq    %r14, %rdi
-; CHECK-NEXT: movl    %ebx, %esi
-; CHECK-NEXT: movl    %r15d, %edx
-; CHECK-NEXT: movl    %ebp, %ecx
-; CHECK-NEXT: callq   masked_store4_v3
-; CHECK-NEXT: addq    $8, %rsp
-; CHECK-NEXT: popq    %rbx
-; CHECK-NEXT: popq    %r14
-; CHECK-NEXT: popq    %r15
-; CHECK-NEXT: popq    %rbp
-; CHECK-NEXT: retq
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    pushq %r15
+; CHECK-NEXT:    pushq %r14
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    movq %rdi, %r14
+; CHECK-NEXT:    movzbl (%rdx), %ebp
+; CHECK-NEXT:    movl %ebp, %eax
+; CHECK-NEXT:    shrl %eax
+; CHECK-NEXT:    andl $1, %eax
+; CHECK-NEXT:    movl %ebp, %ecx
+; CHECK-NEXT:    andl $1, %ecx
+; CHECK-NEXT:    movd %ecx, %xmm0
+; CHECK-NEXT:    pinsrd $1, %eax, %xmm0
+; CHECK-NEXT:    shrl $2, %ebp
+; CHECK-NEXT:    andl $1, %ebp
+; CHECK-NEXT:    pinsrd $2, %ebp, %xmm0
+; CHECK-NEXT:    movd %xmm0, %ebx
+; CHECK-NEXT:    pextrd $1, %xmm0, %r15d
+; CHECK-NEXT:    movq %rsi, %rdi
+; CHECK-NEXT:    movl %ebx, %esi
+; CHECK-NEXT:    movl %r15d, %edx
+; CHECK-NEXT:    movl %ebp, %ecx
+; CHECK-NEXT:    callq masked_load_v3
+; CHECK-NEXT:    movq %r14, %rdi
+; CHECK-NEXT:    movl %ebx, %esi
+; CHECK-NEXT:    movl %r15d, %edx
+; CHECK-NEXT:    movl %ebp, %ecx
+; CHECK-NEXT:    callq masked_store4_v3
+; CHECK-NEXT:    addq $8, %rsp
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    popq %r14
+; CHECK-NEXT:    popq %r15
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    retq
   %predicate = load <3 x i1>, <3 x i1>* %predicate_ptr
   %load1 = call <3 x i32> @masked_load_v3(i32 addrspace(1)* %in, <3 x i1> %predicate)
   call void @masked_store4_v3(<3 x i32> %load1, i32 addrspace(1)* %out, <3 x i1> %predicate)




More information about the llvm-commits mailing list