[llvm] [X86] Recognise VPMADD52L pattern with AVX512IFMA/AVXIFMA (#153787) (PR #156714)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 9 01:50:41 PDT 2025
================
@@ -0,0 +1,559 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avxifma | FileCheck %s --check-prefixes=X64,AVX
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512ifma | FileCheck %s --check-prefixes=X64,AVX512,AVX512-NOVL
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefixes=X64,AVX512,AVX512VL
+
+; 67108863 == (1 << 26) - 1
+; 4503599627370496 == (1 << 52)
+; 4503599627370495 == (1 << 52) - 1
+
+define <8 x i64> @test_512_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) {
+; AVX-LABEL: test_512_combine:
+; AVX: # %bb.0:
+; AVX-NEXT: vpbroadcastq {{.*#+}} ymm6 = [67108863,67108863,67108863,67108863]
+; AVX-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX-NEXT: vpand %ymm6, %ymm0, %ymm0
+; AVX-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4
+; AVX-NEXT: vpand %ymm6, %ymm3, %ymm0
+; AVX-NEXT: vpand %ymm6, %ymm1, %ymm1
+; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm1, %ymm5
+; AVX-NEXT: vmovdqa %ymm4, %ymm0
+; AVX-NEXT: vmovdqa %ymm5, %ymm1
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_512_combine:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm3 = [67108863,67108863,67108863,67108863,67108863,67108863,67108863,67108863]
+; AVX512-NEXT: vpandq %zmm3, %zmm0, %zmm0
+; AVX512-NEXT: vpandq %zmm3, %zmm1, %zmm1
+; AVX512-NEXT: vpmadd52luq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %x_masked = and <8 x i64> %x, splat (i64 67108863)
+ %y_masked = and <8 x i64> %y, splat (i64 67108863)
+ %mul = mul nuw nsw <8 x i64> %x_masked, %y_masked
+ %res = add nuw nsw <8 x i64> %mul, %z
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_512_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) {
+; AVX-LABEL: test_512_combine_v2:
+; AVX: # %bb.0:
+; AVX-NEXT: vpbroadcastq {{.*#+}} ymm6 = [3,3,3,3]
+; AVX-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX-NEXT: vpbroadcastq {{.*#+}} ymm7 = [1125899906842623,1125899906842623,1125899906842623,1125899906842623]
+; AVX-NEXT: vpand %ymm7, %ymm0, %ymm0
+; AVX-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4
+; AVX-NEXT: vpand %ymm6, %ymm3, %ymm0
+; AVX-NEXT: vpand %ymm7, %ymm1, %ymm1
+; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm1, %ymm5
+; AVX-NEXT: vmovdqa %ymm4, %ymm0
+; AVX-NEXT: vmovdqa %ymm5, %ymm1
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_512_combine_v2:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm1
+; AVX512-NEXT: vpmadd52luq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %x_masked = and <8 x i64> %x, splat (i64 1125899906842623) ; (1 << 50) - 1
+ %y_masked = and <8 x i64> %y, splat (i64 3)
+ %mul = mul nuw nsw <8 x i64> %x_masked, %y_masked
+ %res = add nuw nsw <8 x i64> %mul, %z
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_512_no_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) {
+; AVX-LABEL: test_512_no_combine:
+; AVX: # %bb.0:
+; AVX-NEXT: vpbroadcastq {{.*#+}} ymm6 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495]
+; AVX-NEXT: vpand %ymm6, %ymm0, %ymm7
+; AVX-NEXT: vpand %ymm6, %ymm1, %ymm8
+; AVX-NEXT: vpand %ymm6, %ymm2, %ymm9
+; AVX-NEXT: vpand %ymm6, %ymm3, %ymm6
+; AVX-NEXT: vpsrlq $32, %ymm8, %ymm8
+; AVX-NEXT: vpmuludq %ymm3, %ymm8, %ymm8
+; AVX-NEXT: vpsrlq $32, %ymm6, %ymm6
+; AVX-NEXT: vpmuludq %ymm6, %ymm1, %ymm6
+; AVX-NEXT: vpaddq %ymm6, %ymm8, %ymm6
+; AVX-NEXT: vpsllq $32, %ymm6, %ymm6
+; AVX-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
+; AVX-NEXT: vpsrlq $32, %ymm7, %ymm3
+; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm3
+; AVX-NEXT: vpsrlq $32, %ymm9, %ymm7
+; AVX-NEXT: vpmuludq %ymm7, %ymm0, %ymm7
+; AVX-NEXT: vpaddq %ymm3, %ymm7, %ymm3
+; AVX-NEXT: vpsllq $32, %ymm3, %ymm3
+; AVX-NEXT: vpmuludq %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vpaddq %ymm4, %ymm0, %ymm0
+; AVX-NEXT: vpaddq %ymm3, %ymm0, %ymm0
+; AVX-NEXT: vpaddq %ymm5, %ymm1, %ymm1
+; AVX-NEXT: vpaddq %ymm6, %ymm1, %ymm1
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_512_no_combine:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm3 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495]
+; AVX512-NEXT: vpandq %zmm3, %zmm0, %zmm4
+; AVX512-NEXT: vpandq %zmm3, %zmm1, %zmm3
+; AVX512-NEXT: vpsrlq $32, %zmm4, %zmm4
+; AVX512-NEXT: vpmuludq %zmm1, %zmm4, %zmm4
+; AVX512-NEXT: vpsrlq $32, %zmm3, %zmm3
+; AVX512-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
+; AVX512-NEXT: vpaddq %zmm4, %zmm3, %zmm3
+; AVX512-NEXT: vpsllq $32, %zmm3, %zmm3
+; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %zmm3, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x_masked = and <8 x i64> %x, splat (i64 4503599627370495)
+ %y_masked = and <8 x i64> %y, splat (i64 4503599627370495)
+ %mul = mul nuw nsw <8 x i64> %x_masked, %y_masked
+ %res = add nuw nsw <8 x i64> %mul, %z
+ ret <8 x i64> %res
+}
+
+define <8 x i64> @test_512_no_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) {
+; AVX-LABEL: test_512_no_combine_v2:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsrlq $32, %ymm1, %ymm6
+; AVX-NEXT: vpmuludq %ymm3, %ymm6, %ymm6
+; AVX-NEXT: vpsrlq $32, %ymm3, %ymm7
+; AVX-NEXT: vpmuludq %ymm7, %ymm1, %ymm7
+; AVX-NEXT: vpaddq %ymm6, %ymm7, %ymm6
+; AVX-NEXT: vpsllq $32, %ymm6, %ymm6
+; AVX-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
+; AVX-NEXT: vpsrlq $32, %ymm0, %ymm3
+; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm3
+; AVX-NEXT: vpsrlq $32, %ymm2, %ymm7
+; AVX-NEXT: vpmuludq %ymm7, %ymm0, %ymm7
+; AVX-NEXT: vpaddq %ymm3, %ymm7, %ymm3
+; AVX-NEXT: vpsllq $32, %ymm3, %ymm3
+; AVX-NEXT: vpmuludq %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vpaddq %ymm4, %ymm0, %ymm0
+; AVX-NEXT: vpaddq %ymm3, %ymm0, %ymm0
+; AVX-NEXT: vpaddq %ymm5, %ymm1, %ymm1
+; AVX-NEXT: vpaddq %ymm6, %ymm1, %ymm1
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_512_no_combine_v2:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsrlq $32, %zmm0, %zmm3
+; AVX512-NEXT: vpmuludq %zmm1, %zmm3, %zmm3
+; AVX512-NEXT: vpsrlq $32, %zmm1, %zmm4
+; AVX512-NEXT: vpmuludq %zmm4, %zmm0, %zmm4
+; AVX512-NEXT: vpaddq %zmm3, %zmm4, %zmm3
+; AVX512-NEXT: vpsllq $32, %zmm3, %zmm3
+; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %zmm3, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %mul = mul <8 x i64> %x, %y
+ %res = add <8 x i64> %mul, %z
+ ret <8 x i64> %res
+}
+
+define <4 x i64> @test_256_combine(<4 x i64> %x, <4 x i64> %y, <4 x i64> %z) {
+; AVX-LABEL: test_256_combine:
+; AVX: # %bb.0:
+; AVX-NEXT: vpbroadcastq {{.*#+}} ymm3 = [67108863,67108863,67108863,67108863]
+; AVX-NEXT: vpand %ymm3, %ymm0, %ymm0
+; AVX-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX-NEXT: {vex} vpmadd52luq %ymm1, %ymm0, %ymm2
+; AVX-NEXT: vmovdqa %ymm2, %ymm0
+; AVX-NEXT: retq
+;
+; AVX512-NOVL-LABEL: test_256_combine:
+; AVX512-NOVL: # %bb.0:
+; AVX512-NOVL-NEXT: vpbroadcastq {{.*#+}} ymm3 = [67108863,67108863,67108863,67108863]
+; AVX512-NOVL-NEXT: vpand %ymm3, %ymm0, %ymm0
+; AVX512-NOVL-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX512-NOVL-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
+; AVX512-NOVL-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX512-NOVL-NEXT: retq
+;
+; AVX512VL-LABEL: test_256_combine:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm3 = [67108863,67108863,67108863,67108863]
+; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT: vpmadd52luq %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa %ymm2, %ymm0
+; AVX512VL-NEXT: retq
+ %x_masked = and <4 x i64> %x, splat(i64 67108863)
+ %y_masked = and <4 x i64> %y, splat(i64 67108863)
+ %mul = mul nuw nsw <4 x i64> %x_masked, %y_masked
+ %res = add nuw nsw <4 x i64> %z, %mul
+ ret <4 x i64> %res
+}
+
+define <4 x i64> @test_256_no_combine(<4 x i64> %x, <4 x i64> %y, <4 x i64> %z) {
+; X64-LABEL: test_256_no_combine:
+; X64: # %bb.0:
+; X64-NEXT: vpsrlq $32, %ymm0, %ymm3
+; X64-NEXT: vpmuludq %ymm1, %ymm3, %ymm3
+; X64-NEXT: vpsrlq $32, %ymm1, %ymm4
+; X64-NEXT: vpmuludq %ymm4, %ymm0, %ymm4
+; X64-NEXT: vpaddq %ymm3, %ymm4, %ymm3
+; X64-NEXT: vpsllq $32, %ymm3, %ymm3
+; X64-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; X64-NEXT: vpaddq %ymm3, %ymm0, %ymm0
+; X64-NEXT: retq
+ %mul = mul <4 x i64> %x, %y
+ %res = add <4 x i64> %mul, %z
+ ret <4 x i64> %res
+}
+
+define <2 x i64> @test_128_combine(<2 x i64> %x, <2 x i64> %y, <2 x i64> %z) {
+; AVX-LABEL: test_128_combine:
+; AVX: # %bb.0:
+; AVX-NEXT: vpbroadcastq {{.*#+}} xmm3 = [67108863,67108863]
+; AVX-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX-NEXT: {vex} vpmadd52luq %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vmovdqa %xmm2, %xmm0
+; AVX-NEXT: retq
+;
+; AVX512-NOVL-LABEL: test_128_combine:
+; AVX512-NOVL: # %bb.0:
+; AVX512-NOVL-NEXT: vpbroadcastq {{.*#+}} xmm3 = [67108863,67108863]
+; AVX512-NOVL-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX512-NOVL-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX512-NOVL-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
+; AVX512-NOVL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX512-NOVL-NEXT: retq
+;
+; AVX512VL-LABEL: test_128_combine:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpbroadcastq {{.*#+}} xmm3 = [67108863,67108863]
+; AVX512VL-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX512VL-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT: vpmadd52luq %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT: vmovdqa %xmm2, %xmm0
+; AVX512VL-NEXT: retq
+ %x_masked = and <2 x i64> %x, splat (i64 67108863)
+ %y_masked = and <2 x i64> %y, splat (i64 67108863)
+ %mul = mul <2 x i64> %x_masked, %y_masked
+ %res = add <2 x i64> %z, %mul
+ ret <2 x i64> %res
+}
+
+; Sanity check we're not applying this here
+define <1 x i64> @test_scalar_no_ifma(<1 x i64> %x, <1 x i64> %y, <1 x i64> %z) {
+; X64-LABEL: test_scalar_no_ifma:
+; X64: # %bb.0:
+; X64-NEXT: imulq %rsi, %rdi
+; X64-NEXT: leaq (%rdi,%rdx), %rax
+; X64-NEXT: retq
+ %mul = mul <1 x i64> %x, %y
+ %res = add <1 x i64> %mul, %z
+ ret <1 x i64> %res
+}
+
+define <8 x i64> @test_mixed_width_too_wide(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) {
+ ; 40-bit and 13-bit, too wide
+; AVX-LABEL: test_mixed_width_too_wide:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %ymm5, %ymm1
+; AVX-NEXT: vmovaps %ymm4, %ymm0
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: test_mixed_width_too_wide:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovaps %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %x40 = and <8 x i64> %x, splat (i64 1099511627775)
+ %y13 = and <8 x i64> %y, splat (i64 8191)
+ %mul = mul <8 x i64> %x40, %y13
+ %res = add <8 x i64> %z, %mul
+ ret <8 x i64> %z
----------------
RKSimon wrote:
```
ret <8 x i64> %res
```
https://github.com/llvm/llvm-project/pull/156714
More information about the llvm-commits
mailing list