[llvm] r298918 - [x86] add AVX2 run to show 256-bit opportunity; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 28 06:46:50 PDT 2017


Author: spatel
Date: Tue Mar 28 08:46:50 2017
New Revision: 298918

URL: http://llvm.org/viewvc/llvm-project?rev=298918&view=rev
Log:
[x86] add AVX2 run to show 256-bit opportunity; NFC

Modified:
    llvm/trunk/test/CodeGen/X86/memcmp.ll

Modified: llvm/trunk/test/CodeGen/X86/memcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memcmp.ll?rev=298918&r1=298917&r2=298918&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memcmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memcmp.ll Tue Mar 28 08:46:50 2017
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X32
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=AVX2 | FileCheck %s --check-prefix=X64 --check-prefix=AVX2
 
 ; This tests codegen time inlining/optimization of memcmp
 ; rdar://6480398
@@ -177,15 +178,14 @@ define i1 @length16(i8* %x, i8* %y) noun
 ; X32-NEXT:    setne %al
 ; X32-NEXT:    retl
 ;
-; X64-LABEL: length16:
-; X64:       # BB#0:
-; X64-NEXT:    movdqu (%rsi), %xmm0
-; X64-NEXT:    movdqu (%rdi), %xmm1
-; X64-NEXT:    pcmpeqb %xmm0, %xmm1
-; X64-NEXT:    pmovmskb %xmm1, %eax
-; X64-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; X64-NEXT:    setne %al
-; X64-NEXT:    retq
+; AVX2-LABEL: length16:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovdqu (%rdi), %xmm0
+; AVX2-NEXT:    vpcmpeqb (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vpmovmskb %xmm0, %eax
+; AVX2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
+; AVX2-NEXT:    setne %al
+; AVX2-NEXT:    retq
   %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind
   %cmp = icmp ne i32 %call, 0
   ret i1 %cmp
@@ -204,14 +204,14 @@ define i1 @length16_const(i8* %X, i32* n
 ; X32-NEXT:    sete %al
 ; X32-NEXT:    retl
 ;
-; X64-LABEL: length16_const:
-; X64:       # BB#0:
-; X64-NEXT:    movdqu (%rdi), %xmm0
-; X64-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
-; X64-NEXT:    pmovmskb %xmm0, %eax
-; X64-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; X64-NEXT:    sete %al
-; X64-NEXT:    retq
+; AVX2-LABEL: length16_const:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovdqu (%rdi), %xmm0
+; AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpmovmskb %xmm0, %eax
+; AVX2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
+; AVX2-NEXT:    sete %al
+; AVX2-NEXT:    retq
   %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind
   %c = icmp eq i32 %m, 0
   ret i1 %c




More information about the llvm-commits mailing list