[llvm] abe6dbe - [X86] combine-and.ll - add AVX1 test coverage
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Dec 7 06:24:42 PST 2022
Author: Simon Pilgrim
Date: 2022-12-07T14:21:24Z
New Revision: abe6dbeeddc66b7cc515e57327e7a453cc23273f
URL: https://github.com/llvm/llvm-project/commit/abe6dbeeddc66b7cc515e57327e7a453cc23273f
DIFF: https://github.com/llvm/llvm-project/commit/abe6dbeeddc66b7cc515e57327e7a453cc23273f.diff
LOG: [X86] combine-and.ll - add AVX1 test coverage
Added:
Modified:
llvm/test/CodeGen/X86/combine-and.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/combine-and.ll b/llvm/test/CodeGen/X86/combine-and.ll
index 87fcfcc4a82c..c2731b93439d 100644
--- a/llvm/test/CodeGen/X86/combine-and.ll
+++ b/llvm/test/CodeGen/X86/combine-and.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx2 < %s | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq,+avx512bw < %s | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
@@ -324,6 +325,11 @@ define <2 x i64> @and_or_v2i64(<2 x i64> %a0) {
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,8]
; SSE-NEXT: retq
;
+; AVX1-LABEL: and_or_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [8,8]
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: and_or_v2i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [8,8]
@@ -345,10 +351,20 @@ define <4 x i32> @and_or_v4i32(<4 x i32> %a0) {
; SSE-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3]
; SSE-NEXT: retq
;
-; AVX-LABEL: and_or_v4i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vbroadcastss {{.*#+}} xmm0 = [3,3,3,3]
-; AVX-NEXT: retq
+; AVX1-LABEL: and_or_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [3,3,3,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: and_or_v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm0 = [3,3,3,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: and_or_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vbroadcastss {{.*#+}} xmm0 = [3,3,3,3]
+; AVX512-NEXT: retq
%1 = or <4 x i32> %a0, <i32 15, i32 15, i32 15, i32 15>
%2 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
ret <4 x i32> %2
@@ -479,6 +495,14 @@ define <2 x i64> @neg_scalar_broadcast_v2i64(i64 %a0, <2 x i64> %a1) {
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: retq
;
+; AVX1-LABEL: neg_scalar_broadcast_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: notq %rdi
+; AVX1-NEXT: vmovq %rdi, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: neg_scalar_broadcast_v2i64:
; AVX2: # %bb.0:
; AVX2-NEXT: notq %rdi
@@ -509,6 +533,14 @@ define <4 x i32> @neg_scalar_broadcast_v4i32(i32 %a0, <4 x i32> %a1) {
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: retq
;
+; AVX1-LABEL: neg_scalar_broadcast_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: notl %edi
+; AVX1-NEXT: vmovd %edi, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: neg_scalar_broadcast_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: notl %edi
@@ -540,6 +572,15 @@ define <8 x i16> @neg_scalar_broadcast_v8i16(i16 %a0, <8 x i16> %a1) {
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: retq
;
+; AVX1-LABEL: neg_scalar_broadcast_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: notl %edi
+; AVX1-NEXT: vmovd %edi, %xmm1
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: neg_scalar_broadcast_v8i16:
; AVX2: # %bb.0:
; AVX2-NEXT: notl %edi
@@ -572,6 +613,15 @@ define <16 x i8> @neg_scalar_broadcast_v16i8(i8 %a0, <16 x i8> %a1) {
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: retq
;
+; AVX1-LABEL: neg_scalar_broadcast_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: notb %dil
+; AVX1-NEXT: vmovd %edi, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: neg_scalar_broadcast_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: notb %dil
@@ -604,6 +654,15 @@ define <2 x i64> @neg_scalar_broadcast_v16i8_v2i64(i8 %a0, <2 x i64> %a1) {
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: retq
;
+; AVX1-LABEL: neg_scalar_broadcast_v16i8_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: notb %dil
+; AVX1-NEXT: vmovd %edi, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: neg_scalar_broadcast_v16i8_v2i64:
; AVX2: # %bb.0:
; AVX2-NEXT: notb %dil
@@ -635,6 +694,14 @@ define <2 x i64> @neg_scalar_broadcast_v4i32_v2i64(i32 %a0, <2 x i64> %a1) {
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: retq
;
+; AVX1-LABEL: neg_scalar_broadcast_v4i32_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: notl %edi
+; AVX1-NEXT: vmovd %edi, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: neg_scalar_broadcast_v4i32_v2i64:
; AVX2: # %bb.0:
; AVX2-NEXT: notl %edi
@@ -667,6 +734,15 @@ define <4 x i32> @neg_scalar_broadcast_two_uses(i32 %a0, <4 x i32> %a1, ptr %a2)
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: retq
;
+; AVX1-LABEL: neg_scalar_broadcast_two_uses:
+; AVX1: # %bb.0:
+; AVX1-NEXT: notl %edi
+; AVX1-NEXT: vmovd %edi, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT: vmovdqa %xmm1, (%rsi)
+; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: neg_scalar_broadcast_two_uses:
; AVX2: # %bb.0:
; AVX2-NEXT: notl %edi
More information about the llvm-commits
mailing list