[llvm] a84c084 - [X86][SSE] Add combine-pmadd.ll for PMADDWD/VPMADDUBSW combines
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 2 02:51:10 PDT 2021
Author: Simon Pilgrim
Date: 2021-09-02T10:48:49+01:00
New Revision: a84c084122cbaa89839f535b354dd64518a6690a
URL: https://github.com/llvm/llvm-project/commit/a84c084122cbaa89839f535b354dd64518a6690a
DIFF: https://github.com/llvm/llvm-project/commit/a84c084122cbaa89839f535b354dd64518a6690a.diff
LOG: [X86][SSE] Add combine-pmadd.ll for PMADDWD/VPMADDUBSW combines
Pre-commit for D108522 to show failure to fold multiply by zero operands
Added:
llvm/test/CodeGen/X86/combine-pmadd.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/combine-pmadd.ll b/llvm/test/CodeGen/X86/combine-pmadd.ll
new file mode 100644
index 000000000000..f84e2e8c192d
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-pmadd.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX
+
+declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
+declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind readnone
+
+define <4 x i32> @combine_pmaddwd_zero(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: combine_pmaddwd_zero:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pmaddwd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_pmaddwd_zero:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> zeroinitializer)
+ ret <4 x i32> %1
+}
+
+define <4 x i32> @combine_pmaddwd_zero_commute(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: combine_pmaddwd_zero_commute:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pmaddwd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_pmaddwd_zero_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpmaddwd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> zeroinitializer, <8 x i16> %a0)
+ ret <4 x i32> %1
+}
+
+define <8 x i16> @combine_pmaddubsw_zero(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: combine_pmaddubsw_zero:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pmaddubsw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_pmaddubsw_zero:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> zeroinitializer)
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @combine_pmaddubsw_zero_commute(<16 x i8> %a0, <16 x i8> %a1) {
+; SSE-LABEL: combine_pmaddubsw_zero_commute:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pmaddubsw %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_pmaddubsw_zero_commute:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %1 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> zeroinitializer, <16 x i8> %a0)
+ ret <8 x i16> %1
+}
+
More information about the llvm-commits
mailing list