[llvm] r271224 - [X86][AVX2] Regenerated AVX2 extension tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon May 30 11:49:59 PDT 2016


Author: rksimon
Date: Mon May 30 13:49:57 2016
New Revision: 271224

URL: http://llvm.org/viewvc/llvm-project?rev=271224&view=rev
Log:
[X86][AVX2] Regenerated AVX2 extension tests

Modified:
    llvm/trunk/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll

Modified: llvm/trunk/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll?rev=271224&r1=271223&r2=271224&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll Mon May 30 13:49:57 2016
@@ -1,16 +1,34 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=X64
 
 define <16 x i16> @test_llvm_x86_avx2_pmovsxbw(<16 x i8>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbw
-; CHECK: vpmovsxbw (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovsxbw:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovsxbw (%eax), %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovsxbw:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovsxbw (%rdi), %ymm0
+; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a, align 1
   %2 = sext <16 x i8> %1 to <16 x i16>
   ret <16 x i16> %2
 }
 
 define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbd
-; CHECK: vpmovsxbd (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovsxbd:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovsxbd (%eax), %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovsxbd:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovsxbd (%rdi), %ymm0
+; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a, align 1
   %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %3 = sext <8 x i8> %2 to <8 x i32>
@@ -18,8 +36,16 @@ define <8 x i32> @test_llvm_x86_avx2_pmo
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbq
-; CHECK: vpmovsxbq (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovsxbq:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovsxbq (%eax), %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovsxbq:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovsxbq (%rdi), %ymm0
+; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a, align 1
   %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %3 = sext <4 x i8> %2 to <4 x i64>
@@ -27,16 +53,32 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
 }
 
 define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwd
-; CHECK: vpmovsxwd (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovsxwd:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovsxwd (%eax), %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovsxwd:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovsxwd (%rdi), %ymm0
+; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a, align 1
   %2 = sext <8 x i16> %1 to <8 x i32>
   ret <8 x i32> %2
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwq
-; CHECK: vpmovsxwq (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovsxwq:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovsxwq (%eax), %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovsxwq:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovsxwq (%rdi), %ymm0
+; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a, align 1
   %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %3 = sext <4 x i16> %2 to <4 x i64>
@@ -44,24 +86,48 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovsxdq
-; CHECK: vpmovsxdq (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovsxdq:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovsxdq (%eax), %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovsxdq:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovsxdq (%rdi), %ymm0
+; X64-NEXT:    retq
   %1 = load <4 x i32>, <4 x i32>* %a, align 1
   %2 = sext <4 x i32> %1 to <4 x i64>
   ret <4 x i64> %2
 }
 
 define <16 x i16> @test_llvm_x86_avx2_pmovzxbw(<16 x i8>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbw
-; CHECK: vpmovzxbw (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovzxbw:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovzxbw:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a, align 1
   %2 = zext <16 x i8> %1 to <16 x i16>
   ret <16 x i16> %2
 }
 
 define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbd
-; CHECK: vpmovzxbd (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovzxbd:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovzxbd:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a, align 1
   %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %3 = zext <8 x i8> %2 to <8 x i32>
@@ -69,8 +135,16 @@ define <8 x i32> @test_llvm_x86_avx2_pmo
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbq
-; CHECK: vpmovzxbq (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovzxbq:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovzxbq:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
+; X64-NEXT:    retq
   %1 = load <16 x i8>, <16 x i8>* %a, align 1
   %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %3 = zext <4 x i8> %2 to <4 x i64>
@@ -78,16 +152,32 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
 }
 
 define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwd
-; CHECK: vpmovzxwd (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovzxwd:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovzxwd:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a, align 1
   %2 = zext <8 x i16> %1 to <8 x i32>
   ret <8 x i32> %2
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwq
-; CHECK: vpmovzxwq (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovzxwq:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovzxwq:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; X64-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a, align 1
   %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %3 = zext <4 x i16> %2 to <4 x i64>
@@ -95,8 +185,16 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovzxdq(<4 x i32>* %a) {
-; CHECK-LABEL: test_llvm_x86_avx2_pmovzxdq
-; CHECK: vpmovzxdq (%rdi), %ymm0
+; X32-LABEL: test_llvm_x86_avx2_pmovzxdq:
+; X32:       ## BB#0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: test_llvm_x86_avx2_pmovzxdq:
+; X64:       ## BB#0:
+; X64-NEXT:    vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; X64-NEXT:    retq
   %1 = load <4 x i32>, <4 x i32>* %a, align 1
   %2 = zext <4 x i32> %1 to <4 x i64>
   ret <4 x i64> %2




More information about the llvm-commits mailing list