[llvm] 5bd374d - [X86] psadbw.ll - add AVX2 target test coverage
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 19 09:04:23 PST 2024
Author: Simon Pilgrim
Date: 2024-02-19T17:04:07Z
New Revision: 5bd374df3e89df701ea8c02e5704911935660d1e
URL: https://github.com/llvm/llvm-project/commit/5bd374df3e89df701ea8c02e5704911935660d1e
DIFF: https://github.com/llvm/llvm-project/commit/5bd374df3e89df701ea8c02e5704911935660d1e.diff
LOG: [X86] psadbw.ll - add AVX2 target test coverage
Added:
Modified:
llvm/test/CodeGen/X86/psadbw.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/psadbw.ll b/llvm/test/CodeGen/X86/psadbw.ll
index c58407d32f91a9..8141b22d321f4d 100644
--- a/llvm/test/CodeGen/X86/psadbw.ll
+++ b/llvm/test/CodeGen/X86/psadbw.ll
@@ -1,13 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,X86-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,X64-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
; Only bottom 16 bits are set - upper 48 bits are zero.
define <2 x i64> @combine_psadbw_shift(<16 x i8> %0, <16 x i8> %1) nounwind {
-; CHECK-LABEL: combine_psadbw_shift:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xorps %xmm0, %xmm0
-; CHECK-NEXT: ret{{[l|q]}}
+; SSE-LABEL: combine_psadbw_shift:
+; SSE: # %bb.0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX2-LABEL: combine_psadbw_shift:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: retq
%3 = tail call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %0, <16 x i8> %1)
%4 = lshr <2 x i64> %3, <i64 48, i64 48>
ret <2 x i64> %4
@@ -15,18 +21,24 @@ define <2 x i64> @combine_psadbw_shift(<16 x i8> %0, <16 x i8> %1) nounwind {
; Propagate the demanded result elements to the 8 aliasing source elements.
define i64 @combine_psadbw_demandedelt(<16 x i8> %0, <16 x i8> %1) nounwind {
-; X86-LABEL: combine_psadbw_demandedelt:
-; X86: # %bb.0:
-; X86-NEXT: psadbw %xmm1, %xmm0
-; X86-NEXT: movd %xmm0, %eax
-; X86-NEXT: xorl %edx, %edx
-; X86-NEXT: retl
+; X86-SSE-LABEL: combine_psadbw_demandedelt:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: psadbw %xmm1, %xmm0
+; X86-SSE-NEXT: movd %xmm0, %eax
+; X86-SSE-NEXT: xorl %edx, %edx
+; X86-SSE-NEXT: retl
+;
+; X64-SSE-LABEL: combine_psadbw_demandedelt:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: psadbw %xmm1, %xmm0
+; X64-SSE-NEXT: movq %xmm0, %rax
+; X64-SSE-NEXT: retq
;
-; X64-LABEL: combine_psadbw_demandedelt:
-; X64: # %bb.0:
-; X64-NEXT: psadbw %xmm1, %xmm0
-; X64-NEXT: movq %xmm0, %rax
-; X64-NEXT: retq
+; AVX2-LABEL: combine_psadbw_demandedelt:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: retq
%3 = shufflevector <16 x i8> %0, <16 x i8> %0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11>
%4 = shufflevector <16 x i8> %1, <16 x i8> %1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11>
%5 = tail call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %3, <16 x i8> %4)
@@ -36,25 +48,33 @@ define i64 @combine_psadbw_demandedelt(<16 x i8> %0, <16 x i8> %1) nounwind {
; TODO: Each PSADBW source element has a maximum value of 3 - so max sum-of-
diff s for each <8 x i8> should be 24.
define <2 x i64> @combine_psadbw_cmp_knownbits(<16 x i8> %a0) nounwind {
-; X86-LABEL: combine_psadbw_cmp_knownbits:
-; X86: # %bb.0:
-; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT: pxor %xmm1, %xmm1
-; X86-NEXT: psadbw %xmm0, %xmm1
-; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; X86-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT: retl
+; X86-SSE-LABEL: combine_psadbw_cmp_knownbits:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: pxor %xmm1, %xmm1
+; X86-SSE-NEXT: psadbw %xmm0, %xmm1
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
+; X86-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: retl
+;
+; X64-SSE-LABEL: combine_psadbw_cmp_knownbits:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: pxor %xmm1, %xmm1
+; X64-SSE-NEXT: psadbw %xmm0, %xmm1
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
+; X64-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: retq
;
-; X64-LABEL: combine_psadbw_cmp_knownbits:
-; X64: # %bb.0:
-; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT: pxor %xmm1, %xmm1
-; X64-NEXT: psadbw %xmm0, %xmm1
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; X64-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT: retq
+; AVX2-LABEL: combine_psadbw_cmp_knownbits:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
%mask = and <16 x i8> %a0, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
%sad = tail call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %mask, <16 x i8> zeroinitializer)
%cmp = icmp sgt <2 x i64> %sad, <i64 32, i64 32>
@@ -64,42 +84,53 @@ define <2 x i64> @combine_psadbw_cmp_knownbits(<16 x i8> %a0) nounwind {
; TODO: No need to scalarize the sitofp as the PSADBW results are smaller than i32.
define <2 x double> @combine_psadbw_sitofp_knownbits(<16 x i8> %a0) nounwind {
-; X86-LABEL: combine_psadbw_sitofp_knownbits:
-; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: andl $-8, %esp
-; X86-NEXT: subl $32, %esp
-; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT: pxor %xmm1, %xmm1
-; X86-NEXT: psadbw %xmm0, %xmm1
-; X86-NEXT: movq %xmm1, {{[0-9]+}}(%esp)
-; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; X86-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
-; X86-NEXT: fildll {{[0-9]+}}(%esp)
-; X86-NEXT: fstpl {{[0-9]+}}(%esp)
-; X86-NEXT: fildll {{[0-9]+}}(%esp)
-; X86-NEXT: fstpl (%esp)
-; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
-; X86-NEXT: movl %ebp, %esp
-; X86-NEXT: popl %ebp
-; X86-NEXT: retl
+; X86-SSE-LABEL: combine_psadbw_sitofp_knownbits:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: pushl %ebp
+; X86-SSE-NEXT: movl %esp, %ebp
+; X86-SSE-NEXT: andl $-8, %esp
+; X86-SSE-NEXT: subl $32, %esp
+; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: pxor %xmm1, %xmm1
+; X86-SSE-NEXT: psadbw %xmm0, %xmm1
+; X86-SSE-NEXT: movq %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; X86-SSE-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fstpl (%esp)
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
+; X86-SSE-NEXT: movl %ebp, %esp
+; X86-SSE-NEXT: popl %ebp
+; X86-SSE-NEXT: retl
;
-; X64-LABEL: combine_psadbw_sitofp_knownbits:
-; X64: # %bb.0:
-; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT: pxor %xmm1, %xmm1
-; X64-NEXT: psadbw %xmm0, %xmm1
-; X64-NEXT: movd %xmm1, %eax
-; X64-NEXT: xorps %xmm0, %xmm0
-; X64-NEXT: cvtsi2sd %eax, %xmm0
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; X64-NEXT: movd %xmm1, %eax
-; X64-NEXT: xorps %xmm1, %xmm1
-; X64-NEXT: cvtsi2sd %eax, %xmm1
-; X64-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT: retq
+; X64-SSE-LABEL: combine_psadbw_sitofp_knownbits:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: pxor %xmm1, %xmm1
+; X64-SSE-NEXT: psadbw %xmm0, %xmm1
+; X64-SSE-NEXT: movd %xmm1, %eax
+; X64-SSE-NEXT: xorps %xmm0, %xmm0
+; X64-SSE-NEXT: cvtsi2sd %eax, %xmm0
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; X64-SSE-NEXT: movd %xmm1, %eax
+; X64-SSE-NEXT: xorps %xmm1, %xmm1
+; X64-SSE-NEXT: cvtsi2sd %eax, %xmm1
+; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT: retq
+;
+; AVX2-LABEL: combine_psadbw_sitofp_knownbits:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vcvtdq2pd %xmm0, %xmm1
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: vcvtsi2sd %eax, %xmm2, %xmm0
+; AVX2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: retq
%mask = and <16 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%sad = tail call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %mask, <16 x i8> zeroinitializer)
%cvt = sitofp <2 x i64> %sad to <2 x double>
@@ -108,27 +139,40 @@ define <2 x double> @combine_psadbw_sitofp_knownbits(<16 x i8> %a0) nounwind {
; TODO: Convert from uitofp to sitofp as the PSADBW results are zero-extended.
define <2 x double> @combine_psadbw_uitofp_knownbits(<16 x i8> %a0) nounwind {
-; X86-LABEL: combine_psadbw_uitofp_knownbits:
-; X86: # %bb.0:
-; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT: pxor %xmm1, %xmm1
-; X86-NEXT: psadbw %xmm1, %xmm0
-; X86-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT: movapd {{.*#+}} xmm1 = [0,1160773632,0,1160773632]
-; X86-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-NEXT: addpd %xmm1, %xmm0
-; X86-NEXT: retl
+; X86-SSE-LABEL: combine_psadbw_uitofp_knownbits:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: pxor %xmm1, %xmm1
+; X86-SSE-NEXT: psadbw %xmm1, %xmm0
+; X86-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: movapd {{.*#+}} xmm1 = [0,1160773632,0,1160773632]
+; X86-SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE-NEXT: addpd %xmm1, %xmm0
+; X86-SSE-NEXT: retl
+;
+; X64-SSE-LABEL: combine_psadbw_uitofp_knownbits:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: pxor %xmm1, %xmm1
+; X64-SSE-NEXT: psadbw %xmm1, %xmm0
+; X64-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: movapd {{.*#+}} xmm1 = [4985484787499139072,4985484787499139072]
+; X64-SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-SSE-NEXT: addpd %xmm1, %xmm0
+; X64-SSE-NEXT: retq
;
-; X64-LABEL: combine_psadbw_uitofp_knownbits:
-; X64: # %bb.0:
-; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT: pxor %xmm1, %xmm1
-; X64-NEXT: psadbw %xmm1, %xmm0
-; X64-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT: movapd {{.*#+}} xmm1 = [4985484787499139072,4985484787499139072]
-; X64-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; X64-NEXT: addpd %xmm1, %xmm0
-; X64-NEXT: retq
+; AVX2-LABEL: combine_psadbw_uitofp_knownbits:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vmovddup {{.*#+}} xmm1 = [4985484787499139072,4985484787499139072]
+; AVX2-NEXT: # xmm1 = mem[0,0]
+; AVX2-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
%mask = and <16 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
%sad = tail call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %mask, <16 x i8> zeroinitializer)
%cvt = uitofp <2 x i64> %sad to <2 x double>
More information about the llvm-commits
mailing list