[llvm] [X86] Blocklist instructions that are unsafe for masked-load folding. (PR #178888)

via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 30 06:09:03 PST 2026


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-x86

Author: None (azwolski)

<details>
<summary>Changes</summary>

This PR blocklist instructions that are unsafe for masked-load folding.

Folding with the same mask is only safe if every active destination element reads only from source elements that are also active under the same mask. These instructions perform element rearrangement or broadcasting, which may cause active destination elements to read from masked-off source elements.

VPERMILPD and VPERMILPS are safe only in the rrk form, the rik form needs to be blocklisted. In the rrk form, the masked source operand is a control mask, while in the rik form the masked source operand is the data/value. This is also why VPSHUFB is safe to fold, while other shuffles such as VSHUFPS are not.

Examples:
```
EVEX.128.66.0F.WIG 67 /r VPACKUSWB xmm1{k1}{z}, xmm2, xmm3/m128 
A: 00010203 7F000001 80000002 DEADBEEF  
E : 00000000 00000001 00000002 00000003  
D: 11111111 22222222 33333333 44444444  
k = 0x0400  
Masked_e = 00000000 00000000 00000000 00000000 (vmovdqu8{k}{z} Masked_e E) 
res1 = 00000000 00000000 00010000 00000000   (VPACKUSWB D{k}{z}, A, E) 
res2 =  00000000 00000000 00000000 00000000 (VPACKUSWB D{k}{z}, A, Masked_e) 

EVEX.128.66.0F38.W0 C4 /r VPCONFLICTD xmm1 {k1}{z}, xmm2/m128/m32bcst
A: DAA66D2B FFFFFFFC FFFFFFFC D9A0643C  
E : 7DDF743F 00000000 5FD99E73 4ED634C9  
D: 2629AB38 9E37782F 67BB800F AD66764A  
k = 0x0002 
Masked_e = (vmovdqu32 {k}{z} Masked_e E) 
res1 = 00000000 00000000 00000000 00000000 (VPCONFLICTD D{k}{z}, E) 
res2 = 00000000 00000001 00000000 00000000  (VPCONFLICTD D{k}{z}, Masked_e) 

EVEX.128.66.0F38.W1 8D /r VPERMW xmm1 {k1}{z}, xmm2, xmm3/m128 
A: 00010203 7F000001 80000002 DEADBEEF  
E : 00000000 00000001 00000002 00000003  
D: 11111111 22222222 33333333 44444444  
k = 0x0010 
Masked_e = 00000000 00000000 00000002 00000000 (vmovdqu16 {k}{z} Masked_e E) 
res1 = 00000000 00000000 00000001 00000000 (vpermw D{k}{z}, A, E) 
res2 =  00000000 00000000 00000000 00000000  (vpermw D{k}{z}, A, Masked_e) 

EVEX.128.66.0F38.W0 78 /r VPBROADCASTB xmm1{k1}{z}, xmm2/m8 
E : 7F4A7C15 6E490933 5D4C9659 4C433CE3  
D: F63F9D36 97F6E2B2 9432E8E6 FAEE7A3E  
k = 0x0002 
Masked_e = 00007C00 00000000 00000000 00000000 (vmovdqu8{k}{z} Masked_e E) 
res =  00001500 00000000 00000000 00000000 (vpbroadcastb D{k}{z}, E) 
res =  00000000 00000000 00000000 00000000 (vpbroadcastb D{k}{z}, Masked_e)
```

Baseline: https://github.com/llvm/llvm-project/pull/178411

---

Patch is 48.51 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/178888.diff


8 Files Affected:

- (modified) llvm/lib/Target/X86/X86InstrFoldTables.cpp (+8) 
- (modified) llvm/lib/Target/X86/X86InstrFoldTables.h (+4) 
- (modified) llvm/lib/Target/X86/X86InstrInfo.cpp (+5) 
- (modified) llvm/test/CodeGen/X86/interleave-load-fold.ll (+10-7) 
- (modified) llvm/test/CodeGen/X86/non-foldable-with-the-same-mask.mir (+88-59) 
- (modified) llvm/test/TableGen/x86-fold-tables.inc (+446) 
- (modified) llvm/utils/TableGen/X86FoldTablesEmitter.cpp (+59) 
- (modified) llvm/utils/TableGen/X86ManualFoldTables.def (+105) 


``````````diff
diff --git a/llvm/lib/Target/X86/X86InstrFoldTables.cpp b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
index 560b8c378ead7..9b22f6bb767c5 100644
--- a/llvm/lib/Target/X86/X86InstrFoldTables.cpp
+++ b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
@@ -143,6 +143,14 @@ const X86FoldTableEntry *llvm::lookupFoldTable(unsigned RegOp, unsigned OpNum) {
   return lookupFoldTableImpl(FoldTable, RegOp);
 }
 
+bool llvm::isNonFoldableWithSameMask(unsigned RegOp) {
+  // NonFoldableWithSameMask table stores instruction opcodes that are unsafe
+  // for masked-load folding when the same mask is used.
+  ArrayRef<unsigned> Table(NonFoldableWithSameMaskTable);
+  auto I = llvm::lower_bound(Table, RegOp);
+  return I != Table.end() && *I == RegOp;
+}
+
 const X86FoldTableEntry *llvm::lookupBroadcastFoldTable(unsigned RegOp,
                                                         unsigned OpNum) {
   ArrayRef<X86FoldTableEntry> FoldTable;
diff --git a/llvm/lib/Target/X86/X86InstrFoldTables.h b/llvm/lib/Target/X86/X86InstrFoldTables.h
index 9c5dea48d2273..35a3e993e3f96 100644
--- a/llvm/lib/Target/X86/X86InstrFoldTables.h
+++ b/llvm/lib/Target/X86/X86InstrFoldTables.h
@@ -44,6 +44,10 @@ const X86FoldTableEntry *lookupTwoAddrFoldTable(unsigned RegOp);
 // operand OpNum.
 const X86FoldTableEntry *lookupFoldTable(unsigned RegOp, unsigned OpNum);
 
+// Check if an instruction is unsafe for masked-load folding when the load
+// and instruction have the same mask.
+bool isNonFoldableWithSameMask(unsigned RegOp);
+
 // Look up the broadcast folding table entry for folding a broadcast with
 // operand OpNum.
 const X86FoldTableEntry *lookupBroadcastFoldTable(unsigned RegOp,
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index c99865cc2dfcd..2b6c21d48125a 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -8155,6 +8155,11 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
       MaskReg = Op2.getReg();
 
     if (MaskReg) {
+      // Some instructions are invalid to fold into even with the same mask.
+      // Folding is unsafe if an active destination element may read from a
+      // source element that is masked off.
+      if (isNonFoldableWithSameMask(MI.getOpcode()))
+        return nullptr;
       bool HasSameMask = false;
       for (unsigned I = 1, E = MI.getDesc().getNumOperands(); I < E; ++I) {
         const MachineOperand &Op = MI.getOperand(I);
diff --git a/llvm/test/CodeGen/X86/interleave-load-fold.ll b/llvm/test/CodeGen/X86/interleave-load-fold.ll
index 28f313bf6a0fa..e2430ff5e1c03 100644
--- a/llvm/test/CodeGen/X86/interleave-load-fold.ll
+++ b/llvm/test/CodeGen/X86/interleave-load-fold.ll
@@ -5,8 +5,10 @@ define <16 x i8> @interleave_masked_select(ptr %mask, ptr %src) nounwind {
 ; X64-LABEL: interleave_masked_select:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw (%rdi), %k1
-; X64-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; X64-NEXT:    vpunpcklbw {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; X64-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; X64-NEXT:    vmovdqu8 (%rsi), %xmm0 {%k1}
+; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X64-NEXT:    vpunpcklbw {{.*#+}} xmm0 {%k1} {z} = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; X64-NEXT:    retq
   %mask_vec = load <16 x i1>, ptr %mask
   %vec2 = load <16 x i8>, ptr %src
@@ -19,12 +21,13 @@ define <16 x i8> @interleave_masked_select(ptr %mask, ptr %src) nounwind {
 define <16 x i1> @interleave_masked_blend(i16 %mask, ptr %src1, ptr %src2) nounwind {
 ; X64-LABEL: interleave_masked_blend:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa (%rsi), %xmm0
 ; X64-NEXT:    kmovd %edi, %k1
-; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vpunpcklbw {{.*#+}} xmm2 {%k1} {z} = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; X64-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X64-NEXT:    vpcmpeqb %xmm0, %xmm2, %xmm0
+; X64-NEXT:    vmovdqa (%rsi), %xmm0
+; X64-NEXT:    vpblendmb (%rdx), %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; X64-NEXT:    vpunpcklbw {{.*#+}} xmm1 {%k1} {z} = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; X64-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; X64-NEXT:    vpcmpeqb %xmm0, %xmm1, %xmm0
 ; X64-NEXT:    retq
   %mask_vec = bitcast i16 %mask to <16 x i1>
   %vec1 = load <16 x i8>, ptr %src1
diff --git a/llvm/test/CodeGen/X86/non-foldable-with-the-same-mask.mir b/llvm/test/CodeGen/X86/non-foldable-with-the-same-mask.mir
index 3281218848c0a..95cb460495512 100644
--- a/llvm/test/CodeGen/X86/non-foldable-with-the-same-mask.mir
+++ b/llvm/test/CodeGen/X86/non-foldable-with-the-same-mask.mir
@@ -21,9 +21,10 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk4wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
+    ; CHECK-NEXT: [[VMOVDQA32Z128rmkz:%[0-9]+]]:vr128x = VMOVDQA32Z128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
     ; CHECK-NEXT: [[AVX512_128_SET0_1:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VALIGNDZ128rmik:%[0-9]+]]:vr128 = VALIGNDZ128rmik [[AVX512_128_SET0_]], [[COPY]], [[AVX512_128_SET0_1]], $rdi, 1, $noreg, 0, $noreg, 1 :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VALIGNDZ128rmik]]
+    ; CHECK-NEXT: [[VALIGNDZ128rrik:%[0-9]+]]:vr128 = VALIGNDZ128rrik [[AVX512_128_SET0_]], [[COPY]], [[AVX512_128_SET0_1]], [[VMOVDQA32Z128rmkz]], 1
+    ; CHECK-NEXT: $xmm0 = COPY [[VALIGNDZ128rrik]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk4wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -53,9 +54,10 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk16wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
+    ; CHECK-NEXT: [[VMOVDQU8Z128rmkz:%[0-9]+]]:vr128x = VMOVDQU8Z128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
     ; CHECK-NEXT: [[AVX512_128_SET0_1:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VPALIGNRZ128rmikz:%[0-9]+]]:vr128 = VPALIGNRZ128rmikz [[COPY]], [[AVX512_128_SET0_1]], $rdi, 1, $noreg, 0, $noreg, 4 :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VPALIGNRZ128rmikz]]
+    ; CHECK-NEXT: [[VPALIGNRZ128rrikz:%[0-9]+]]:vr128 = VPALIGNRZ128rrikz [[COPY]], [[AVX512_128_SET0_1]], [[VMOVDQU8Z128rmkz]], 4
+    ; CHECK-NEXT: $xmm0 = COPY [[VPALIGNRZ128rrikz]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk16wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -84,8 +86,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk4wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VBROADCASTSSZ128rmk:%[0-9]+]]:vr128 = VBROADCASTSSZ128rmk [[AVX512_128_SET0_]], [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VBROADCASTSSZ128rmk]]
+    ; CHECK-NEXT: [[VMOVAPSZ128rmkz:%[0-9]+]]:vr128x = VMOVAPSZ128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
+    ; CHECK-NEXT: [[VBROADCASTSSZ128rrk:%[0-9]+]]:vr128 = VBROADCASTSSZ128rrk [[AVX512_128_SET0_]], [[COPY]], [[VMOVAPSZ128rmkz]]
+    ; CHECK-NEXT: $xmm0 = COPY [[VBROADCASTSSZ128rrk]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk4wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -113,8 +116,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk2wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VMOVDDUPZ128rmkz:%[0-9]+]]:vr128 = VMOVDDUPZ128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VMOVDDUPZ128rmkz]]
+    ; CHECK-NEXT: [[VMOVAPDZ128rmkz:%[0-9]+]]:vr128x = VMOVAPDZ128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
+    ; CHECK-NEXT: [[VMOVDDUPZ128rrkz:%[0-9]+]]:vr128 = VMOVDDUPZ128rrkz [[COPY]], [[VMOVAPDZ128rmkz]]
+    ; CHECK-NEXT: $xmm0 = COPY [[VMOVDDUPZ128rrkz]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk2wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -142,8 +146,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk4wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VMOVSHDUPZ128rmkz:%[0-9]+]]:vr128 = VMOVSHDUPZ128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VMOVSHDUPZ128rmkz]]
+    ; CHECK-NEXT: [[VMOVAPSZ128rmkz:%[0-9]+]]:vr128x = VMOVAPSZ128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
+    ; CHECK-NEXT: [[VMOVSHDUPZ128rrkz:%[0-9]+]]:vr128 = VMOVSHDUPZ128rrkz [[COPY]], [[VMOVAPSZ128rmkz]]
+    ; CHECK-NEXT: $xmm0 = COPY [[VMOVSHDUPZ128rrkz]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk4wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -171,8 +176,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk4wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VPBROADCASTDZ128rmkz:%[0-9]+]]:vr128 = VPBROADCASTDZ128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VPBROADCASTDZ128rmkz]]
+    ; CHECK-NEXT: [[VMOVDQA32Z128rmkz:%[0-9]+]]:vr128x = VMOVDQA32Z128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
+    ; CHECK-NEXT: [[VPBROADCASTDZ128rrkz:%[0-9]+]]:vr128 = VPBROADCASTDZ128rrkz [[COPY]], [[VMOVDQA32Z128rmkz]]
+    ; CHECK-NEXT: $xmm0 = COPY [[VPBROADCASTDZ128rrkz]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk4wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -201,9 +207,10 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk8wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
+    ; CHECK-NEXT: [[VMOVDQU16Z128rmkz:%[0-9]+]]:vr128x = VMOVDQU16Z128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
     ; CHECK-NEXT: [[AVX512_128_SET0_1:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VDBPSADBWZ128rmikz:%[0-9]+]]:vr128 = VDBPSADBWZ128rmikz [[COPY]], [[AVX512_128_SET0_1]], $rdi, 1, $noreg, 0, $noreg, 0 :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VDBPSADBWZ128rmikz]]
+    ; CHECK-NEXT: [[VDBPSADBWZ128rrikz:%[0-9]+]]:vr128 = VDBPSADBWZ128rrikz [[COPY]], [[AVX512_128_SET0_1]], [[VMOVDQU16Z128rmkz]], 0
+    ; CHECK-NEXT: $xmm0 = COPY [[VDBPSADBWZ128rrikz]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk8wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -233,9 +240,10 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk16wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
+    ; CHECK-NEXT: [[VMOVDQU8Z128rmkz:%[0-9]+]]:vr128x = VMOVDQU8Z128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
     ; CHECK-NEXT: [[AVX512_128_SET0_1:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VGF2P8AFFINEQBZ128rmikz:%[0-9]+]]:vr128 = VGF2P8AFFINEQBZ128rmikz [[COPY]], [[AVX512_128_SET0_1]], $rdi, 1, $noreg, 0, $noreg, 0 :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VGF2P8AFFINEQBZ128rmikz]]
+    ; CHECK-NEXT: [[VGF2P8AFFINEQBZ128rrikz:%[0-9]+]]:vr128 = VGF2P8AFFINEQBZ128rrikz [[COPY]], [[AVX512_128_SET0_1]], [[VMOVDQU8Z128rmkz]], 0
+    ; CHECK-NEXT: $xmm0 = COPY [[VGF2P8AFFINEQBZ128rrikz]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk16wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -265,9 +273,10 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk8wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
+    ; CHECK-NEXT: [[VMOVDQU16Z128rmkz:%[0-9]+]]:vr128x = VMOVDQU16Z128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
     ; CHECK-NEXT: [[AVX512_128_SET0_1:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VMPSADBWZ128rmikz:%[0-9]+]]:vr128 = VMPSADBWZ128rmikz [[COPY]], [[AVX512_128_SET0_1]], $rdi, 1, $noreg, 0, $noreg, 0 :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VMPSADBWZ128rmikz]]
+    ; CHECK-NEXT: [[VMPSADBWZ128rrikz:%[0-9]+]]:vr128 = VMPSADBWZ128rrikz [[COPY]], [[AVX512_128_SET0_1]], [[VMOVDQU16Z128rmkz]], 0
+    ; CHECK-NEXT: $xmm0 = COPY [[VMPSADBWZ128rrikz]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk8wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -296,8 +305,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk4wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VPCONFLICTDZ128rmk:%[0-9]+]]:vr128 = VPCONFLICTDZ128rmk [[AVX512_128_SET0_]], [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VPCONFLICTDZ128rmk]]
+    ; CHECK-NEXT: [[VMOVDQA32Z128rmkz:%[0-9]+]]:vr128x = VMOVDQA32Z128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
+    ; CHECK-NEXT: [[VPCONFLICTDZ128rrk:%[0-9]+]]:vr128 = VPCONFLICTDZ128rrk [[AVX512_128_SET0_]], [[COPY]], [[VMOVDQA32Z128rmkz]]
+    ; CHECK-NEXT: $xmm0 = COPY [[VPCONFLICTDZ128rrk]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk4wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -326,9 +336,10 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk16wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
+    ; CHECK-NEXT: [[VMOVDQU8Z128rmkz:%[0-9]+]]:vr128x = VMOVDQU8Z128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
     ; CHECK-NEXT: [[AVX512_128_SET0_1:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VPMULTISHIFTQBZ128rmkz:%[0-9]+]]:vr128 = VPMULTISHIFTQBZ128rmkz [[COPY]], [[AVX512_128_SET0_1]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VPMULTISHIFTQBZ128rmkz]]
+    ; CHECK-NEXT: [[VPMULTISHIFTQBZ128rrkz:%[0-9]+]]:vr128 = VPMULTISHIFTQBZ128rrkz [[COPY]], [[AVX512_128_SET0_1]], [[VMOVDQU8Z128rmkz]]
+    ; CHECK-NEXT: $xmm0 = COPY [[VPMULTISHIFTQBZ128rrkz]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk16wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -357,8 +368,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk4wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VEXPANDPSZ128rmk:%[0-9]+]]:vr128 = VEXPANDPSZ128rmk [[AVX512_128_SET0_]], [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VEXPANDPSZ128rmk]]
+    ; CHECK-NEXT: [[VMOVAPSZ128rmkz:%[0-9]+]]:vr128x = VMOVAPSZ128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
+    ; CHECK-NEXT: [[VEXPANDPSZ128rrk:%[0-9]+]]:vr128 = VEXPANDPSZ128rrk [[AVX512_128_SET0_]], [[COPY]], [[VMOVAPSZ128rmkz]]
+    ; CHECK-NEXT: $xmm0 = COPY [[VEXPANDPSZ128rrk]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk4wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -385,11 +397,12 @@ body:             |
     ; CHECK-LABEL: name: test_vinserti32x4_same_mask
     ; CHECK: liveins: $rdi, $k1
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk16wm = COPY $k1
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk4wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_256_SET0_:%[0-9]+]]:vr256x = AVX512_256_SET0
     ; CHECK-NEXT: [[AVX512_256_SET0_1:%[0-9]+]]:vr256x = AVX512_256_SET0
-    ; CHECK-NEXT: [[VINSERTI32X4Z256rmikz:%[0-9]+]]:vr256 = VINSERTI32X4Z256rmikz [[COPY]], [[AVX512_256_SET0_1]], $rdi, 1, $noreg, 0, $noreg, 1 :: (load (s128))
-    ; CHECK-NEXT: $ymm0 = COPY [[VINSERTI32X4Z256rmikz]]
+    ; CHECK-NEXT: [[VMOVDQA32Z128rmkz:%[0-9]+]]:vr128x = VMOVDQA32Z128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
+    ; CHECK-NEXT: [[VINSERTI32X4Z256rrikz:%[0-9]+]]:vr256 = VINSERTI32X4Z256rrikz [[COPY]], [[AVX512_256_SET0_1]], [[VMOVDQA32Z128rmkz]], 1
+    ; CHECK-NEXT: $ymm0 = COPY [[VINSERTI32X4Z256rrikz]]
     ; CHECK-NEXT: RET 0, $ymm0
     %0:vk4wm = COPY $k1
     %1:vr256x = AVX512_256_SET0
@@ -419,9 +432,10 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk8wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
+    ; CHECK-NEXT: [[VMOVDQU16Z128rmkz:%[0-9]+]]:vr128x = VMOVDQU16Z128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
     ; CHECK-NEXT: [[AVX512_128_SET0_1:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VPACKSSDWZ128rmk:%[0-9]+]]:vr128 = VPACKSSDWZ128rmk [[AVX512_128_SET0_]], [[COPY]], [[AVX512_128_SET0_1]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VPACKSSDWZ128rmk]]
+    ; CHECK-NEXT: [[VPACKSSDWZ128rrk:%[0-9]+]]:vr128 = VPACKSSDWZ128rrk [[AVX512_128_SET0_]], [[COPY]], [[AVX512_128_SET0_1]], [[VMOVDQU16Z128rmkz]]
+    ; CHECK-NEXT: $xmm0 = COPY [[VPACKSSDWZ128rrk]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk8wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -451,9 +465,10 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk8wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_256_SET0_:%[0-9]+]]:vr256x = AVX512_256_SET0
+    ; CHECK-NEXT: [[VMOVDQA32Z256rmkz:%[0-9]+]]:vr256x = VMOVDQA32Z256rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s256))
     ; CHECK-NEXT: [[AVX512_256_SET0_1:%[0-9]+]]:vr256x = AVX512_256_SET0
-    ; CHECK-NEXT: [[VPERMDZ256rmk:%[0-9]+]]:vr256 = VPERMDZ256rmk [[AVX512_256_SET0_]], [[COPY]], [[AVX512_256_SET0_1]], $rdi, 1, $noreg, 0, $noreg :: (load (s256))
-    ; CHECK-NEXT: $ymm0 = COPY [[VPERMDZ256rmk]]
+    ; CHECK-NEXT: [[VPERMDZ256rrk:%[0-9]+]]:vr256 = VPERMDZ256rrk [[AVX512_256_SET0_]], [[COPY]], [[AVX512_256_SET0_1]], [[VMOVDQA32Z256rmkz]]
+    ; CHECK-NEXT: $ymm0 = COPY [[VPERMDZ256rrk]]
     ; CHECK-NEXT: RET 0, $ymm0
     %0:vk8wm = COPY $k1
     %1:vr256x = AVX512_256_SET0
@@ -483,9 +498,10 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk4wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_128_SET0_:%[0-9]+]]:vr128x = AVX512_128_SET0
+    ; CHECK-NEXT: [[VMOVDQA32Z128rmkz:%[0-9]+]]:vr128x = VMOVDQA32Z128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
     ; CHECK-NEXT: [[AVX512_128_SET0_1:%[0-9]+]]:vr128x = AVX512_128_SET0
-    ; CHECK-NEXT: [[VPERMI2DZ128rmkz:%[0-9]+]]:vr128x = VPERMI2DZ128rmkz [[AVX512_128_SET0_]], [[COPY]], [[AVX512_128_SET0_1]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VPERMI2DZ128rmkz]]
+    ; CHECK-NEXT: [[VPERMI2DZ128rrkz:%[0-9]+]]:vr128x = VPERMI2DZ128rrkz [[AVX512_128_SET0_]], [[COPY]], [[AVX512_128_SET0_1]], [[VMOVDQA32Z128rmkz]]
+    ; CHECK-NEXT: $xmm0 = COPY [[VPERMI2DZ128rrkz]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk4wm = COPY $k1
     %1:vr128x = AVX512_128_SET0
@@ -512,8 +528,9 @@ body:             |
     ; CHECK: liveins: $rdi, $k1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk4wm = COPY $k1
-    ; CHECK-NEXT: [[VPERMILPSZ128mikz:%[0-9]+]]:vr128 = VPERMILPSZ128mikz [[COPY]], $rdi, 1, $noreg, 0, $noreg, 27 :: (load (s128))
-    ; CHECK-NEXT: $xmm0 = COPY [[VPERMILPSZ128mikz]]
+    ; CHECK-NEXT: [[VMOVAPSZ128rmkz:%[0-9]+]]:vr128x = VMOVAPSZ128rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s128))
+    ; CHECK-NEXT: [[VPERMILPSZ128rikz:%[0-9]+]]:vr128 = VPERMILPSZ128rikz [[COPY]], [[VMOVAPSZ128rmkz]], 27
+    ; CHECK-NEXT: $xmm0 = COPY [[VPERMILPSZ128rikz]]
     ; CHECK-NEXT: RET 0, $xmm0
     %0:vk4wm = COPY $k1
     %1:vr128x = VMOVAPSZ128rmkz %0, $rdi, 1, $noreg, 0, $noreg :: (load (s128))
@@ -541,9 +558,10 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vk8wm = COPY $k1
     ; CHECK-NEXT: [[AVX512_256_SET0_:%[0-9]+]]:vr256x = AVX512_256_SET0
+    ; CHECK-NEXT: [[VMOVAPSZ256rmkz:%[0-9]+]]:vr256x = VMOVAPSZ256rmkz [[COPY]], $rdi, 1, $noreg, 0, $noreg :: (load (s256))
     ; CHECK-NEXT: [[AVX512_256_SET0_1:%[0-9]+]]:vr256x = AVX512_256_SET0
-    ; CHECK-NEXT: [[VPERMPSZ256rmkz:%[0-9]+]]:vr256 = VPERMPSZ256rmkz [[COPY]], [[AVX512_256_SET0_1]], $rdi, 1, $noreg, 0, $noreg :: (load (s256))
-    ; CHECK-NEXT: $ymm0 = COPY [[VPERMPSZ256rmkz]]
+    ; CHECK-NEXT: [[VPERMPS...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/178888


More information about the llvm-commits mailing list