[llvm] 1d44241 - [X86][AVX] Test SSE41 BLENDV combines on AVX targets as well
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 3 04:22:22 PDT 2020
Author: Simon Pilgrim
Date: 2020-09-03T12:06:19+01:00
New Revision: 1d442415a30ef3be0e6ee189bce8795f203e712f
URL: https://github.com/llvm/llvm-project/commit/1d442415a30ef3be0e6ee189bce8795f203e712f
DIFF: https://github.com/llvm/llvm-project/commit/1d442415a30ef3be0e6ee189bce8795f203e712f.diff
LOG: [X86][AVX] Test SSE41 BLENDV combines on AVX targets as well
Show up any differences due to SSE41 variant being locked to use xmm0
Added:
Modified:
llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll b/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll
index cc4dee33c619..88421d8f3f17 100644
--- a/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/combine-sse41-intrinsics.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefixes=CHECK,AVX
define <2 x double> @test_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) {
@@ -27,28 +28,43 @@ define <8 x i16> @test_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
}
define <2 x double> @test2_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL: test2_x86_sse41_blend_pd:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movaps %xmm1, %xmm0
-; CHECK-NEXT: retq
+; SSE-LABEL: test2_x86_sse41_blend_pd:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test2_x86_sse41_blend_pd:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
%1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 -1)
ret <2 x double> %1
}
define <4 x float> @test2_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL: test2_x86_sse41_blend_ps:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movaps %xmm1, %xmm0
-; CHECK-NEXT: retq
+; SSE-LABEL: test2_x86_sse41_blend_ps:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test2_x86_sse41_blend_ps:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
%1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 -1)
ret <4 x float> %1
}
define <8 x i16> @test2_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL: test2_x86_sse41_pblend_w:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movaps %xmm1, %xmm0
-; CHECK-NEXT: retq
+; SSE-LABEL: test2_x86_sse41_pblend_w:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test2_x86_sse41_pblend_w:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
%1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 -1)
ret <8 x i16> %1
}
@@ -78,13 +94,18 @@ define <8 x i16> @test3_x86_sse41_pblend_w(<8 x i16> %a0) {
}
define double @demandedelts_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
-; CHECK-LABEL: demandedelts_blendvpd:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movapd %xmm0, %xmm3
-; CHECK-NEXT: movaps %xmm2, %xmm0
-; CHECK-NEXT: blendvpd %xmm0, %xmm1, %xmm3
-; CHECK-NEXT: movapd %xmm3, %xmm0
-; CHECK-NEXT: retq
+; SSE-LABEL: demandedelts_blendvpd:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd %xmm0, %xmm3
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: blendvpd %xmm0, %xmm1, %xmm3
+; SSE-NEXT: movapd %xmm3, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: demandedelts_blendvpd:
+; AVX: # %bb.0:
+; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
%1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer
%2 = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
%3 = shufflevector <2 x double> %a2, <2 x double> undef, <2 x i32> zeroinitializer
@@ -94,13 +115,18 @@ define double @demandedelts_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x do
}
define float @demandedelts_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
-; CHECK-LABEL: demandedelts_blendvps:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movaps %xmm0, %xmm3
-; CHECK-NEXT: movaps %xmm2, %xmm0
-; CHECK-NEXT: blendvps %xmm0, %xmm1, %xmm3
-; CHECK-NEXT: movaps %xmm3, %xmm0
-; CHECK-NEXT: retq
+; SSE-LABEL: demandedelts_blendvps:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm3
+; SSE-NEXT: movaps %xmm3, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: demandedelts_blendvps:
+; AVX: # %bb.0:
+; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> zeroinitializer
%2 = shufflevector <4 x float> %a1, <4 x float> undef, <4 x i32> zeroinitializer
%3 = shufflevector <4 x float> %a2, <4 x float> undef, <4 x i32> zeroinitializer
@@ -110,15 +136,22 @@ define float @demandedelts_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float
}
define <16 x i8> @demandedelts_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
-; CHECK-LABEL: demandedelts_pblendvb:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movdqa %xmm0, %xmm3
-; CHECK-NEXT: movdqa %xmm2, %xmm0
-; CHECK-NEXT: pblendvb %xmm0, %xmm1, %xmm3
-; CHECK-NEXT: pxor %xmm0, %xmm0
-; CHECK-NEXT: pshufb %xmm0, %xmm3
-; CHECK-NEXT: movdqa %xmm3, %xmm0
-; CHECK-NEXT: retq
+; SSE-LABEL: demandedelts_pblendvb:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pblendvb %xmm0, %xmm1, %xmm3
+; SSE-NEXT: pxor %xmm0, %xmm0
+; SSE-NEXT: pshufb %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: demandedelts_pblendvb:
+; AVX: # %bb.0:
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> zeroinitializer
%2 = shufflevector <16 x i8> %a1, <16 x i8> undef, <16 x i32> zeroinitializer
%3 = shufflevector <16 x i8> %a2, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -128,19 +161,32 @@ define <16 x i8> @demandedelts_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>
}
define <2 x i64> @demandedbits_blendvpd(i64 %a0, i64 %a2, <2 x double> %a3) {
-; CHECK-LABEL: demandedbits_blendvpd:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: orq $1, %rax
-; CHECK-NEXT: orq $4, %rdi
-; CHECK-NEXT: movq %rax, %xmm1
-; CHECK-NEXT: movq %rdi, %xmm2
-; CHECK-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
-; CHECK-NEXT: movq {{.*#+}} xmm2 = xmm2[0],zero
-; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; CHECK-NEXT: psrlq $11, %xmm1
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retq
+; SSE-LABEL: demandedbits_blendvpd:
+; SSE: # %bb.0:
+; SSE-NEXT: movq %rdi, %rax
+; SSE-NEXT: orq $1, %rax
+; SSE-NEXT: orq $4, %rdi
+; SSE-NEXT: movq %rax, %xmm1
+; SSE-NEXT: movq %rdi, %xmm2
+; SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
+; SSE-NEXT: movq {{.*#+}} xmm2 = xmm2[0],zero
+; SSE-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; SSE-NEXT: psrlq $11, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: demandedbits_blendvpd:
+; AVX: # %bb.0:
+; AVX-NEXT: movq %rdi, %rax
+; AVX-NEXT: orq $1, %rax
+; AVX-NEXT: orq $4, %rdi
+; AVX-NEXT: vmovq %rax, %xmm1
+; AVX-NEXT: vmovq %rdi, %xmm2
+; AVX-NEXT: vmovq {{.*#+}} xmm1 = xmm1[0],zero
+; AVX-NEXT: vmovq {{.*#+}} xmm2 = xmm2[0],zero
+; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm1, %xmm0
+; AVX-NEXT: vpsrlq $11, %xmm0, %xmm0
+; AVX-NEXT: retq
%1 = or i64 %a0, 1
%2 = or i64 %a0, 4
%3 = bitcast i64 %1 to double
@@ -154,26 +200,36 @@ define <2 x i64> @demandedbits_blendvpd(i64 %a0, i64 %a2, <2 x double> %a3) {
}
define <16 x i8> @xor_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
-; CHECK-LABEL: xor_pblendvb:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movdqa %xmm0, %xmm3
-; CHECK-NEXT: movaps %xmm2, %xmm0
-; CHECK-NEXT: pblendvb %xmm0, %xmm3, %xmm1
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retq
+; SSE-LABEL: xor_pblendvb:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: pblendvb %xmm0, %xmm3, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: xor_pblendvb:
+; AVX: # %bb.0:
+; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
%1 = xor <16 x i8> %a2, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%2 = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %1)
ret <16 x i8> %2
}
define <4 x float> @xor_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
-; CHECK-LABEL: xor_blendvps:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movaps %xmm0, %xmm3
-; CHECK-NEXT: movaps %xmm2, %xmm0
-; CHECK-NEXT: blendvps %xmm0, %xmm3, %xmm1
-; CHECK-NEXT: movaps %xmm1, %xmm0
-; CHECK-NEXT: retq
+; SSE-LABEL: xor_blendvps:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm3, %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: xor_blendvps:
+; AVX: # %bb.0:
+; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
%1 = bitcast <4 x float> %a2 to <4 x i32>
%2 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
%3 = bitcast <4 x i32> %2 to <4 x float>
@@ -182,13 +238,18 @@ define <4 x float> @xor_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %
}
define <2 x double> @xor_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
-; CHECK-LABEL: xor_blendvpd:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movapd %xmm0, %xmm3
-; CHECK-NEXT: movaps %xmm2, %xmm0
-; CHECK-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: retq
+; SSE-LABEL: xor_blendvpd:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd %xmm0, %xmm3
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: blendvpd %xmm0, %xmm3, %xmm1
+; SSE-NEXT: movapd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: xor_blendvpd:
+; AVX: # %bb.0:
+; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
%1 = bitcast <2 x double> %a2 to <4 x i32>
%2 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
%3 = bitcast <4 x i32> %2 to <2 x double>
More information about the llvm-commits
mailing list