[llvm] 6f11c39 - [X86] combine-and.ll - add AVX2/AVX512 test coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 24 02:38:29 PST 2022


Author: Simon Pilgrim
Date: 2022-11-24T10:38:11Z
New Revision: 6f11c395f5899a10cbcc823cfd6ef7b08525a17e

URL: https://github.com/llvm/llvm-project/commit/6f11c395f5899a10cbcc823cfd6ef7b08525a17e
DIFF: https://github.com/llvm/llvm-project/commit/6f11c395f5899a10cbcc823cfd6ef7b08525a17e.diff

LOG: [X86] combine-and.ll - add AVX2/AVX512 test coverage

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/combine-and.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/combine-and.ll b/llvm/test/CodeGen/X86/combine-and.ll
index 9f26983795754..0641b9560578c 100644
--- a/llvm/test/CodeGen/X86/combine-and.ll
+++ b/llvm/test/CodeGen/X86/combine-and.ll
@@ -1,5 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx2   < %s | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq,+avx512bw < %s | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
 
 define i32 @and_self(i32 %x) {
 ; CHECK-LABEL: and_self:
@@ -25,140 +27,223 @@ define <4 x i32> @and_self_vec(<4 x i32> %x) {
 ;
 
 define <4 x i32> @test1(<4 x i32> %A) {
-; CHECK-LABEL: test1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test1:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 0, i32 0>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test2(<4 x i32> %A) {
-; CHECK-LABEL: test2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test2:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test2:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 0>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test3(<4 x i32> %A) {
-; CHECK-LABEL: test3:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test3:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test3:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 0, i32 0, i32 -1, i32 0>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test4(<4 x i32> %A) {
-; CHECK-LABEL: test4:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test4:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test4:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 0, i32 0, i32 0, i32 -1>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test5(<4 x i32> %A) {
-; CHECK-LABEL: test5:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test5:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test5:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test6(<4 x i32> %A) {
-; CHECK-LABEL: test6:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test6:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test6:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test7(<4 x i32> %A) {
-; CHECK-LABEL: test7:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test7:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test7:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 0, i32 0, i32 -1, i32 -1>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test8(<4 x i32> %A) {
-; CHECK-LABEL: test8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 0, i32 -1>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test9(<4 x i32> %A) {
-; CHECK-LABEL: test9:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
-; CHECK-NEXT:    retq
+; SSE-LABEL: test9:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test9:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 0>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test10(<4 x i32> %A) {
-; CHECK-LABEL: test10:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test10:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test10:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 -1, i32 0>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test11(<4 x i32> %A) {
-; CHECK-LABEL: test11:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test11:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test11:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 -1, i32 -1>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test12(<4 x i32> %A) {
-; CHECK-LABEL: test12:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test12:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test12:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 -1, i32 0>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test13(<4 x i32> %A) {
-; CHECK-LABEL: test13:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test13:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test13:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 -1>
   ret <4 x i32> %1
 }
 
 define <4 x i32> @test14(<4 x i32> %A) {
-; CHECK-LABEL: test14:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test14:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test14:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
   ret <4 x i32> %1
 }
@@ -166,20 +251,31 @@ define <4 x i32> @test14(<4 x i32> %A) {
 ; X & undef must fold to 0. So lane 0 must choose from the zero vector.
 
 define <4 x i32> @undef_lane(<4 x i32> %x) {
-; CHECK-LABEL: undef_lane:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm1, %xmm1
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: undef_lane:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm1
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: undef_lane:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX-NEXT:    retq
   %r = and <4 x i32> %x, <i32 undef, i32 4294967295, i32 0, i32 4294967295>
   ret <4 x i32> %r
 }
 
 define <4 x i32> @test15(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test15:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test15:
+; SSE:       # %bb.0:
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test15:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
   %2 = and <4 x i32> %B, <i32 0, i32 -1, i32 0, i32 0>
   %3 = or <4 x i32> %1, %2
@@ -187,10 +283,15 @@ define <4 x i32> @test15(<4 x i32> %A, <4 x i32> %B) {
 }
 
 define <4 x i32> @test16(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
   %2 = and <4 x i32> %B, <i32 0, i32 -1, i32 0, i32 -1>
   %3 = or <4 x i32> %1, %2
@@ -198,10 +299,15 @@ define <4 x i32> @test16(<4 x i32> %A, <4 x i32> %B) {
 }
 
 define <4 x i32> @test17(<4 x i32> %A, <4 x i32> %B) {
-; CHECK-LABEL: test17:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test17:
+; SSE:       # %bb.0:
+; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test17:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX-NEXT:    retq
   %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
   %2 = and <4 x i32> %B, <i32 -1, i32 0, i32 -1, i32 0>
   %3 = or <4 x i32> %1, %2
@@ -213,30 +319,51 @@ define <4 x i32> @test17(<4 x i32> %A, <4 x i32> %B) {
 ;
 
 define <2 x i64> @and_or_v2i64(<2 x i64> %a0) {
-; CHECK-LABEL: and_or_v2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [8,8]
-; CHECK-NEXT:    retq
+; SSE-LABEL: and_or_v2i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [8,8]
+; SSE-NEXT:    retq
+;
+; AVX2-LABEL: and_or_v2i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovaps {{.*#+}} xmm0 = [8,8]
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: and_or_v2i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovddup {{.*#+}} xmm0 = [8,8]
+; AVX512-NEXT:    # xmm0 = mem[0,0]
+; AVX512-NEXT:    retq
   %1 = or <2 x i64> %a0, <i64 255, i64 255>
   %2 = and <2 x i64> %1, <i64 8, i64 8>
   ret <2 x i64> %2
 }
 
 define <4 x i32> @and_or_v4i32(<4 x i32> %a0) {
-; CHECK-LABEL: and_or_v4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [3,3,3,3]
-; CHECK-NEXT:    retq
+; SSE-LABEL: and_or_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [3,3,3,3]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: and_or_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm0 = [3,3,3,3]
+; AVX-NEXT:    retq
   %1 = or <4 x i32> %a0, <i32 15, i32 15, i32 15, i32 15>
   %2 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
   ret <4 x i32> %2
 }
 
 define <8 x i16> @and_or_v8i16(<8 x i16> %a0) {
-; CHECK-LABEL: and_or_v8i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movaps {{.*#+}} xmm0 = [15,7,3,1,14,10,2,32767]
-; CHECK-NEXT:    retq
+; SSE-LABEL: and_or_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [15,7,3,1,14,10,2,32767]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: and_or_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [15,7,3,1,14,10,2,32767]
+; AVX-NEXT:    retq
   %1 = or <8 x i16> %a0, <i16 255, i16 127, i16 63, i16 31, i16 15, i16 31, i16 63, i16 -1>
   %2 = and <8 x i16> %1, <i16 15, i16 7, i16 3, i16 1, i16 14, i16 10, i16 2, i16 32767>
   ret <8 x i16> %2
@@ -247,10 +374,15 @@ define <8 x i16> @and_or_v8i16(<8 x i16> %a0) {
 ;
 
 define <2 x i64> @and_or_zext_v2i32(<2 x i32> %a0) {
-; CHECK-LABEL: and_or_zext_v2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; SSE-LABEL: and_or_zext_v2i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: and_or_zext_v2i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %1 = zext <2 x i32> %a0 to <2 x i64>
   %2 = or <2 x i64> %1, <i64 1, i64 1>
   %3 = and <2 x i64> %2, <i64 4294967296, i64 4294967296>
@@ -258,10 +390,15 @@ define <2 x i64> @and_or_zext_v2i32(<2 x i32> %a0) {
 }
 
 define <4 x i32> @and_or_zext_v4i16(<4 x i16> %a0) {
-; CHECK-LABEL: and_or_zext_v4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xorps %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; SSE-LABEL: and_or_zext_v4i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: and_or_zext_v4i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %1 = zext <4 x i16> %a0 to <4 x i32>
   %2 = or <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = and <4 x i32> %2, <i32 65536, i32 65536, i32 65536, i32 65536>
@@ -273,21 +410,32 @@ define <4 x i32> @and_or_zext_v4i16(<4 x i16> %a0) {
 ;
 
 define <8 x i16> @ashr_mask1_v8i16(<8 x i16> %a0) {
-; CHECK-LABEL: ashr_mask1_v8i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    psrlw $15, %xmm0
-; CHECK-NEXT:    retq
+; SSE-LABEL: ashr_mask1_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlw $15, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: ashr_mask1_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $15, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %1 = ashr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
   %2 = and <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ret <8 x i16> %2
 }
 
 define <4 x i32> @ashr_mask7_v4i32(<4 x i32> %a0) {
-; CHECK-LABEL: ashr_mask7_v4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    psrad $31, %xmm0
-; CHECK-NEXT:    psrld $29, %xmm0
-; CHECK-NEXT:    retq
+; SSE-LABEL: ashr_mask7_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrad $31, %xmm0
+; SSE-NEXT:    psrld $29, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: ashr_mask7_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
+; AVX-NEXT:    vpsrld $29, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %1 = ashr <4 x i32> %a0, <i32 31, i32 31, i32 31, i32 31>
   %2 = and <4 x i32> %1, <i32 7, i32 7, i32 7, i32 7>
   ret <4 x i32> %2
@@ -299,12 +447,19 @@ define <4 x i32> @ashr_mask7_v4i32(<4 x i32> %a0) {
 
 ; PR34620 - redundant PAND after vector shift of a byte vector (PSRLW)
 define <16 x i8> @PR34620(<16 x i8> %a0, <16 x i8> %a1) {
-; CHECK-LABEL: PR34620:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    psrlw $1, %xmm0
-; CHECK-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT:    paddb %xmm1, %xmm0
-; CHECK-NEXT:    retq
+; SSE-LABEL: PR34620:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlw $1, %xmm0
+; SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    paddb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: PR34620:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %1 = lshr <16 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %2 = and <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = add <16 x i8> %2, %a1


        


More information about the llvm-commits mailing list