[llvm] c8aaa5d - [X86][AVX512] Add tests showing failure to retain pmuldq broadcast loads on 32-bit targets

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 11 11:30:18 PDT 2022


Author: Simon Pilgrim
Date: 2022-06-11T19:30:00+01:00
New Revision: c8aaa5d9c3a9540afc7c3dc2f5c0b95a7c7897a9

URL: https://github.com/llvm/llvm-project/commit/c8aaa5d9c3a9540afc7c3dc2f5c0b95a7c7897a9
DIFF: https://github.com/llvm/llvm-project/commit/c8aaa5d9c3a9540afc7c3dc2f5c0b95a7c7897a9.diff

LOG: [X86][AVX512] Add tests showing failure to retain pmuldq broadcast loads on 32-bit targets

Noticed while investigating the build vector issues on D127115

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
    llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
index b86a6b74e0387..f171cb8bb6577 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
@@ -5451,6 +5451,37 @@ define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64>
   ret < 8 x i64> %res
 }
 
+define <8 x i64> @test_mask_mul_epi32_rmbk_buildvector(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
+; X86-LABEL: test_mask_mul_epi32_rmbk_buildvector:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpbroadcastd (%eax), %zmm2 ## encoding: [0x62,0xf2,0x7d,0x48,0x58,0x10]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmuldq %zmm2, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x28,0xca]
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_mask_mul_epi32_rmbk_buildvector:
+; X64:       ## %bb.0:
+; X64-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vpmuldq (%rdi){1to8}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x59,0x28,0x0f]
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1]
+; X64-NEXT:    retq ## encoding: [0xc3]
+  %q = load i64, i64* %ptr_b
+  %vecinit.i = insertelement < 8 x i64> undef, i64 %q, i32 0
+  %vecinit.i1 = insertelement < 8 x i64> %vecinit.i, i64 %q, i32 1
+  %vecinit.i2 = insertelement < 8 x i64> %vecinit.i1, i64 %q, i32 2
+  %vecinit.i3 = insertelement < 8 x i64> %vecinit.i2, i64 %q, i32 3
+  %vecinit.i4 = insertelement < 8 x i64> %vecinit.i3, i64 %q, i32 4
+  %vecinit.i5 = insertelement < 8 x i64> %vecinit.i4, i64 %q, i32 5
+  %vecinit.i6 = insertelement < 8 x i64> %vecinit.i5, i64 %q, i32 6
+  %b64 = insertelement < 8 x i64> %vecinit.i6, i64 %q, i32 7
+  %b = bitcast <8 x i64> %b64 to <16 x i32>
+  %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
+  ret < 8 x i64> %res
+}
+
 define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
 ; X86-LABEL: test_mask_mul_epi32_rmbkz:
 ; X86:       ## %bb.0:
@@ -5473,6 +5504,35 @@ define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask
   ret < 8 x i64> %res
 }
 
+define <8 x i64> @test_mask_mul_epi32_rmbkz_buildvector(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
+; X86-LABEL: test_mask_mul_epi32_rmbkz_buildvector:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpbroadcastd (%eax), %zmm1 ## encoding: [0x62,0xf2,0x7d,0x48,0x58,0x08]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmuldq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x28,0xc1]
+; X86-NEXT:    retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_mask_mul_epi32_rmbkz_buildvector:
+; X64:       ## %bb.0:
+; X64-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vpmuldq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xd9,0x28,0x07]
+; X64-NEXT:    retq ## encoding: [0xc3]
+  %q = load i64, i64* %ptr_b
+  %vecinit.i = insertelement < 8 x i64> undef, i64 %q, i32 0
+  %vecinit.i1 = insertelement < 8 x i64> %vecinit.i, i64 %q, i32 1
+  %vecinit.i2 = insertelement < 8 x i64> %vecinit.i1, i64 %q, i32 2
+  %vecinit.i3 = insertelement < 8 x i64> %vecinit.i2, i64 %q, i32 3
+  %vecinit.i4 = insertelement < 8 x i64> %vecinit.i3, i64 %q, i32 4
+  %vecinit.i5 = insertelement < 8 x i64> %vecinit.i4, i64 %q, i32 5
+  %vecinit.i6 = insertelement < 8 x i64> %vecinit.i5, i64 %q, i32 6
+  %b64 = insertelement < 8 x i64> %vecinit.i6, i64 %q, i32 7
+  %b = bitcast <8 x i64> %b64 to <16 x i32>
+  %res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
+  ret < 8 x i64> %res
+}
+
 declare <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32>, <16 x i32>, <8 x i64>, i8)
 
 define <8 x i64> @test_mask_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {

diff  --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
index 82075ae212d80..3014c7b786312 100644
--- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
@@ -11155,6 +11155,31 @@ define < 2 x i64> @test_mask_mul_epi32_rmbk_128(< 4 x i32> %a, i64* %ptr_b, < 2
   ret < 2 x i64> %res
 }
 
+define < 2 x i64> @test_mask_mul_epi32_rmbk_128_buildvector(< 4 x i32> %a, i64* %ptr_b, < 2 x i64> %passThru, i8 %mask) {
+; X86-LABEL: test_mask_mul_epi32_rmbk_128_buildvector:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpbroadcastd (%eax), %xmm2 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x58,0x10]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmuldq %xmm2, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x28,0xca]
+; X86-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_mask_mul_epi32_rmbk_128_buildvector:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vpmuldq (%rdi){1to2}, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0xfd,0x19,0x28,0x0f]
+; X64-NEXT:    vmovdqa %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %q = load i64, i64* %ptr_b
+  %vecinit.i = insertelement < 2 x i64> undef, i64 %q, i32 0
+  %b64 = insertelement < 2 x i64> %vecinit.i, i64 %q, i32 1
+  %b = bitcast < 2 x i64> %b64 to < 4 x i32>
+  %res = call < 2 x i64> @llvm.x86.avx512.mask.pmul.dq.128(< 4 x i32> %a, < 4 x i32> %b, < 2 x i64> %passThru, i8 %mask)
+  ret < 2 x i64> %res
+}
+
 define < 2 x i64> @test_mask_mul_epi32_rmbkz_128(< 4 x i32> %a, i64* %ptr_b, i8 %mask) {
 ; X86-LABEL: test_mask_mul_epi32_rmbkz_128:
 ; X86:       # %bb.0:
@@ -11177,6 +11202,29 @@ define < 2 x i64> @test_mask_mul_epi32_rmbkz_128(< 4 x i32> %a, i64* %ptr_b, i8
   ret < 2 x i64> %res
 }
 
+define < 2 x i64> @test_mask_mul_epi32_rmbkz_128_buildvector(< 4 x i32> %a, i64* %ptr_b, i8 %mask) {
+; X86-LABEL: test_mask_mul_epi32_rmbkz_128_buildvector:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpbroadcastd (%eax), %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x58,0x08]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x28,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_mask_mul_epi32_rmbkz_128_buildvector:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vpmuldq (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x99,0x28,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %q = load i64, i64* %ptr_b
+  %vecinit.i = insertelement < 2 x i64> undef, i64 %q, i32 0
+  %b64 = insertelement < 2 x i64> %vecinit.i, i64 %q, i32 1
+  %b = bitcast < 2 x i64> %b64 to < 4 x i32>
+  %res = call < 2 x i64> @llvm.x86.avx512.mask.pmul.dq.128(< 4 x i32> %a, < 4 x i32> %b, < 2 x i64> zeroinitializer, i8 %mask)
+  ret < 2 x i64> %res
+}
+
 declare < 2 x i64> @llvm.x86.avx512.mask.pmul.dq.128(< 4 x i32>, < 4 x i32>, < 2 x i64>, i8)
 
 define < 4 x i64> @test_mask_mul_epi32_rr_256(< 8 x i32> %a, < 8 x i32> %b) {
@@ -11323,6 +11371,33 @@ define < 4 x i64> @test_mask_mul_epi32_rmbk_256(< 8 x i32> %a, i64* %ptr_b, < 4
   ret < 4 x i64> %res
 }
 
+define < 4 x i64> @test_mask_mul_epi32_rmbk_256_buildvector(< 8 x i32> %a, i64* %ptr_b, < 4 x i64> %passThru, i8 %mask) {
+; X86-LABEL: test_mask_mul_epi32_rmbk_256_buildvector:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpbroadcastd (%eax), %ymm2 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x58,0x10]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmuldq %ymm2, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x28,0xca]
+; X86-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_mask_mul_epi32_rmbk_256_buildvector:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vpmuldq (%rdi){1to4}, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf2,0xfd,0x39,0x28,0x0f]
+; X64-NEXT:    vmovdqa %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %q = load i64, i64* %ptr_b
+  %vecinit.i = insertelement < 4 x i64> undef, i64 %q, i32 0
+  %vecinit.i1 = insertelement < 4 x i64> %vecinit.i, i64 %q, i32 1
+  %vecinit.i2 = insertelement < 4 x i64> %vecinit.i1, i64 %q, i32 2
+  %b64 = insertelement < 4 x i64> %vecinit.i2, i64 %q, i32 3
+  %b = bitcast < 4 x i64> %b64 to < 8 x i32>
+  %res = call < 4 x i64> @llvm.x86.avx512.mask.pmul.dq.256(< 8 x i32> %a, < 8 x i32> %b, < 4 x i64> %passThru, i8 %mask)
+  ret < 4 x i64> %res
+}
+
 define < 4 x i64> @test_mask_mul_epi32_rmbkz_256(< 8 x i32> %a, i64* %ptr_b, i8 %mask) {
 ; X86-LABEL: test_mask_mul_epi32_rmbkz_256:
 ; X86:       # %bb.0:
@@ -11345,6 +11420,31 @@ define < 4 x i64> @test_mask_mul_epi32_rmbkz_256(< 8 x i32> %a, i64* %ptr_b, i8
   ret < 4 x i64> %res
 }
 
+define < 4 x i64> @test_mask_mul_epi32_rmbkz_256_buildvector(< 8 x i32> %a, i64* %ptr_b, i8 %mask) {
+; X86-LABEL: test_mask_mul_epi32_rmbkz_256_buildvector:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-NEXT:    vpbroadcastd (%eax), %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x58,0x08]
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT:    kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT:    vpmuldq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x28,0xc1]
+; X86-NEXT:    retl # encoding: [0xc3]
+;
+; X64-LABEL: test_mask_mul_epi32_rmbkz_256_buildvector:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT:    vpmuldq (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xb9,0x28,0x07]
+; X64-NEXT:    retq # encoding: [0xc3]
+  %q = load i64, i64* %ptr_b
+  %vecinit.i = insertelement < 4 x i64> undef, i64 %q, i32 0
+  %vecinit.i1 = insertelement < 4 x i64> %vecinit.i, i64 %q, i32 1
+  %vecinit.i2 = insertelement < 4 x i64> %vecinit.i1, i64 %q, i32 2
+  %b64 = insertelement < 4 x i64> %vecinit.i2, i64 %q, i32 3
+  %b = bitcast < 4 x i64> %b64 to < 8 x i32>
+  %res = call < 4 x i64> @llvm.x86.avx512.mask.pmul.dq.256(< 8 x i32> %a, < 8 x i32> %b, < 4 x i64> zeroinitializer, i8 %mask)
+  ret < 4 x i64> %res
+}
+
 declare < 4 x i64> @llvm.x86.avx512.mask.pmul.dq.256(< 8 x i32>, < 8 x i32>, < 4 x i64>, i8)
 
 define < 2 x i64> @test_mask_mul_epu32_rr_128(< 4 x i32> %a, < 4 x i32> %b) {


        


More information about the llvm-commits mailing list