[llvm] [X86] Use shift+add/sub for vXi8 splat multiplies (PR #174110)
Cody Cutler via llvm-commits
llvm-commits at lists.llvm.org
Sun Mar 8 11:34:38 PDT 2026
================
@@ -0,0 +1,1808 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX512
+
+;; Tests vXi8 constant-multiply decomposition into shift/add/sub sequences.
+;;
+;; Examples:
+;; 6 = 2^2 + 2^1 = 4 + 2 (or 8 - 2)
+;; 10 = 2^3 + 2^1 = 8 + 2
+;; 12 = 2^3 + 2^2 = 8 + 4 (or 16 - 4)
+;; 18 = 2^4 + 2^1 = 16 + 2
+;; 20 = 2^4 + 2^2 = 16 + 4
+;; 24 = 2^4 + 2^3 = 16 + 8 (or 32 - 8)
+;;
+;; To run this test:
+;; llvm-lit llvm/test/CodeGen/X86/vector-mul-i8-decompose.ll
+;;
+;; To regenerate CHECK lines:
+;; python llvm/utils/update_llc_test_checks.py llvm/test/CodeGen/X86/vector-mul-i8-decompose.ll
+
+;; ============================================================================
+;; v16i8 Tests (128-bit vectors) - Sum of two powers of 2
+;; ============================================================================
+
+; Test multiply by 6 = 4 + 2 = (1 << 2) + (1 << 1)
+define <16 x i8> @mul_v16i8_const6(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const6:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: paddb %xmm0, %xmm1
+; SSE2-NEXT: psllw $2, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const6:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX2-NEXT: vpsllw $2, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const6:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX512-NEXT: vpsllw $2, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 10 = 8 + 2 = (1 << 3) + (1 << 1)
+define <16 x i8> @mul_v16i8_const10(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const10:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: paddb %xmm0, %xmm1
+; SSE2-NEXT: psllw $3, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const10:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX2-NEXT: vpsllw $3, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const10:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX512-NEXT: vpsllw $3, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 12 = 8 + 4 = (1 << 3) + (1 << 2)
+define <16 x i8> @mul_v16i8_const12(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const12:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psllw $2, %xmm1
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: psllw $3, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const12:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $2, %xmm0, %xmm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpsllw $3, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const12:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $2, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $3, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 18 = 16 + 2 = (1 << 4) + (1 << 1)
+define <16 x i8> @mul_v16i8_const18(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const18:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: paddb %xmm0, %xmm1
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const18:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX2-NEXT: vpsllw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const18:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 18, i8 18, i8 18, i8 18, i8 18, i8 18, i8 18, i8 18, i8 18, i8 18, i8 18, i8 18, i8 18, i8 18, i8 18, i8 18>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 20 = 16 + 4 = (1 << 4) + (1 << 2)
+define <16 x i8> @mul_v16i8_const20(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const20:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psllw $2, %xmm1
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const20:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $2, %xmm0, %xmm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpsllw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const20:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $2, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 20, i8 20, i8 20, i8 20, i8 20, i8 20, i8 20, i8 20, i8 20, i8 20, i8 20, i8 20, i8 20, i8 20, i8 20, i8 20>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 24 = 16 + 8 = (1 << 4) + (1 << 3)
+define <16 x i8> @mul_v16i8_const24(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const24:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psllw $3, %xmm1
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const24:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $3, %xmm0, %xmm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpsllw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const24:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $3, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 34 = 32 + 2 = (1 << 5) + (1 << 1)
+define <16 x i8> @mul_v16i8_const34(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const34:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: paddb %xmm0, %xmm1
+; SSE2-NEXT: psllw $5, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const34:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX2-NEXT: vpsllw $5, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const34:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX512-NEXT: vpsllw $5, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 34, i8 34, i8 34, i8 34, i8 34, i8 34, i8 34, i8 34, i8 34, i8 34, i8 34, i8 34, i8 34, i8 34, i8 34, i8 34>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 36 = 32 + 4 = (1 << 5) + (1 << 2)
+define <16 x i8> @mul_v16i8_const36(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const36:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psllw $2, %xmm1
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: psllw $5, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const36:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $2, %xmm0, %xmm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpsllw $5, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const36:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $2, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $5, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 36, i8 36, i8 36, i8 36, i8 36, i8 36, i8 36, i8 36, i8 36, i8 36, i8 36, i8 36, i8 36, i8 36, i8 36, i8 36>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 40 = 32 + 8 = (1 << 5) + (1 << 3)
+define <16 x i8> @mul_v16i8_const40(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const40:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psllw $3, %xmm1
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: psllw $5, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const40:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $3, %xmm0, %xmm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpsllw $5, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const40:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $3, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $5, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 40, i8 40, i8 40, i8 40, i8 40, i8 40, i8 40, i8 40, i8 40, i8 40, i8 40, i8 40, i8 40, i8 40, i8 40, i8 40>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 48 = 32 + 16 = (1 << 5) + (1 << 4)
+define <16 x i8> @mul_v16i8_const48(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const48:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psllw $4, %xmm1
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: psllw $5, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const48:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpsllw $5, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const48:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $5, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48>
+ ret <16 x i8> %result
+}
+
+;; ============================================================================
+;; v16i8 Tests (128-bit vectors) - Difference of two powers of 2
+;; ============================================================================
+
+; Test multiply by 6 = 8 - 2 = (1 << 3) - (1 << 1)
+define <16 x i8> @mul_v16i8_const6_diff(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const6_diff:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: paddb %xmm0, %xmm1
+; SSE2-NEXT: psllw $2, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const6_diff:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX2-NEXT: vpsllw $2, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const6_diff:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX512-NEXT: vpsllw $2, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 12 = 16 - 4 = (1 << 4) - (1 << 2)
+define <16 x i8> @mul_v16i8_const12_diff(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const12_diff:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psllw $2, %xmm1
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: psllw $3, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const12_diff:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $2, %xmm0, %xmm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpsllw $3, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const12_diff:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $2, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $3, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12, i8 12>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 24 = 32 - 8 = (1 << 5) - (1 << 3)
+define <16 x i8> @mul_v16i8_const24_diff(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const24_diff:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psllw $3, %xmm1
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: psllw $4, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const24_diff:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $3, %xmm0, %xmm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpsllw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const24_diff:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $3, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 48 = 64 - 16 = (1 << 6) - (1 << 4)
+define <16 x i8> @mul_v16i8_const48_diff(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const48_diff:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psllw $4, %xmm1
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: psllw $5, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const48_diff:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpsllw $5, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const48_diff:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $5, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48, i8 48>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 96 = 128 - 32 = (1 << 7) - (1 << 5)
+define <16 x i8> @mul_v16i8_const96(<16 x i8> %a) nounwind {
+; SSE2-LABEL: mul_v16i8_const96:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psllw $5, %xmm1
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: psllw $6, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: mul_v16i8_const96:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $5, %xmm0, %xmm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpsllw $6, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: mul_v16i8_const96:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $5, %xmm0, %xmm1
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $6, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+ %result = mul <16 x i8> %a, <i8 96, i8 96, i8 96, i8 96, i8 96, i8 96, i8 96, i8 96, i8 96, i8 96, i8 96, i8 96, i8 96, i8 96, i8 96, i8 96>
+ ret <16 x i8> %result
+}
+
+; Test multiply by 160 = 128 + 32 = (1 << 7) + (1 << 5)
----------------
grodranlorth wrote:
Thanks, this was a surprising one. The optimization does decompose this case as an add, but it looks like some later pass converts it to $0 - (2^5 + 2^6) = -96 \equiv 160$ .
https://github.com/llvm/llvm-project/pull/174110
More information about the llvm-commits
mailing list