[llvm] 7365bfb - [X86][GISel] Avoid creating subreg def operands in emitInsertSubreg (#189408)

via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 31 01:05:49 PDT 2026


Author: Jay Foad
Date: 2026-03-31T09:05:44+01:00
New Revision: 7365bfb3e0650918f7272a0817785c6c01d2feda

URL: https://github.com/llvm/llvm-project/commit/7365bfb3e0650918f7272a0817785c6c01d2feda
DIFF: https://github.com/llvm/llvm-project/commit/7365bfb3e0650918f7272a0817785c6c01d2feda.diff

LOG: [X86][GISel] Avoid creating subreg def operands in emitInsertSubreg (#189408)

emitInsertSubreg builds a COPY with a subregister def operand, but these
probably should not be allowed in SSA MIR. Change it to build an
equivalent use of INSERT_SUBREG instead.

Added: 
    

Modified: 
    llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
    llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
    llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
    llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
    llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
index d9830e93f0c1f..19cbb307529d7 100644
--- a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
@@ -1437,9 +1437,15 @@ bool X86InstructionSelector::emitInsertSubreg(Register DstReg, Register SrcReg,
     return false;
   }
 
-  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
-      .addReg(DstReg, RegState::DefineNoRead, SubIdx)
-      .addReg(SrcReg);
+  Register ImpDefReg = MRI.createVirtualRegister(DstRC);
+  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::IMPLICIT_DEF),
+          ImpDefReg);
+
+  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::INSERT_SUBREG),
+          DstReg)
+      .addReg(ImpDefReg)
+      .addReg(SrcReg)
+      .addImm(SubIdx);
 
   return true;
 }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
index 3368ed699a1f8..71808a998beee 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
@@ -1,5 +1,6 @@
-# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx                -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX
-# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl  -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx                -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl  -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512VL
 --- |
   define void @test_insert_128_idx0() {
     ret void
@@ -20,7 +21,6 @@
 ...
 ---
 name:            test_insert_128_idx0
-# ALL-LABEL: name:  test_insert_128_idx0
 alignment:       16
 legalized:       true
 regBankSelected: true
@@ -28,21 +28,28 @@ registers:
   - { id: 0, class: vecr }
   - { id: 1, class: vecr }
   - { id: 2, class: vecr }
-# AVX:               %0:vr256 = COPY $ymm0
-# AVX-NEXT:          %1:vr128 = COPY $xmm1
-# AVX-NEXT:          %2:vr256 = VINSERTF128rri %0, %1, 0
-# AVX-NEXT:          $ymm0 = COPY %2
-# AVX-NEXT:          RET 0, implicit $ymm0
 #
-# AVX512VL:          %0:vr256x = COPY $ymm0
-# AVX512VL-NEXT:     %1:vr128x = COPY $xmm1
-# AVX512VL-NEXT:     %2:vr256x = VINSERTF32X4Z256rri %0, %1, 0
-# AVX512VL-NEXT:     $ymm0 = COPY %2
-# AVX512VL-NEXT:     RET 0, implicit $ymm0
 body:             |
   bb.1 (%ir-block.0):
     liveins: $ymm0, $ymm1
 
+    ; AVX-LABEL: name: test_insert_128_idx0
+    ; AVX: liveins: $ymm0, $ymm1
+    ; AVX-NEXT: {{  $}}
+    ; AVX-NEXT: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0
+    ; AVX-NEXT: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1
+    ; AVX-NEXT: [[VINSERTF128rri:%[0-9]+]]:vr256 = VINSERTF128rri [[COPY]], [[COPY1]], 0
+    ; AVX-NEXT: $ymm0 = COPY [[VINSERTF128rri]]
+    ; AVX-NEXT: RET 0, implicit $ymm0
+    ;
+    ; AVX512VL-LABEL: name: test_insert_128_idx0
+    ; AVX512VL: liveins: $ymm0, $ymm1
+    ; AVX512VL-NEXT: {{  $}}
+    ; AVX512VL-NEXT: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0
+    ; AVX512VL-NEXT: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
+    ; AVX512VL-NEXT: [[VINSERTF32X4Z256rri:%[0-9]+]]:vr256x = VINSERTF32X4Z256rri [[COPY]], [[COPY1]], 0
+    ; AVX512VL-NEXT: $ymm0 = COPY [[VINSERTF32X4Z256rri]]
+    ; AVX512VL-NEXT: RET 0, implicit $ymm0
     %0(<8 x s32>) = COPY $ymm0
     %1(<4 x s32>) = COPY $xmm1
     %2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 0
@@ -52,7 +59,6 @@ body:             |
 ...
 ---
 name:            test_insert_128_idx0_undef
-# ALL-LABEL: name:  test_insert_128_idx0_undef
 alignment:       16
 legalized:       true
 regBankSelected: true
@@ -60,19 +66,28 @@ registers:
   - { id: 0, class: vecr }
   - { id: 1, class: vecr }
   - { id: 2, class: vecr }
-# AVX:               %1:vr128 = COPY $xmm1
-# AVX-NEXT:          undef %2.sub_xmm:vr256 = COPY %1
-# AVX-NEXT:          $ymm0 = COPY %2
-# AVX-NEXT:          RET 0, implicit $ymm0
 #
-# AVX512VL:          %1:vr128x = COPY $xmm1
-# AVX512VL-NEXT:     undef %2.sub_xmm:vr256x = COPY %1
-# AVX512VL-NEXT:     $ymm0 = COPY %2
-# AVX512VL-NEXT:     RET 0, implicit $ymm0
 body:             |
   bb.1 (%ir-block.0):
     liveins: $ymm0, $ymm1
 
+    ; AVX-LABEL: name: test_insert_128_idx0_undef
+    ; AVX: liveins: $ymm0, $ymm1
+    ; AVX-NEXT: {{  $}}
+    ; AVX-NEXT: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1
+    ; AVX-NEXT: [[DEF:%[0-9]+]]:vr256 = IMPLICIT_DEF
+    ; AVX-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vr256 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_xmm
+    ; AVX-NEXT: $ymm0 = COPY [[INSERT_SUBREG]]
+    ; AVX-NEXT: RET 0, implicit $ymm0
+    ;
+    ; AVX512VL-LABEL: name: test_insert_128_idx0_undef
+    ; AVX512VL: liveins: $ymm0, $ymm1
+    ; AVX512VL-NEXT: {{  $}}
+    ; AVX512VL-NEXT: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
+    ; AVX512VL-NEXT: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
+    ; AVX512VL-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vr256x = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_xmm
+    ; AVX512VL-NEXT: $ymm0 = COPY [[INSERT_SUBREG]]
+    ; AVX512VL-NEXT: RET 0, implicit $ymm0
     %0(<8 x s32>) = IMPLICIT_DEF
     %1(<4 x s32>) = COPY $xmm1
     %2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 0
@@ -82,7 +97,6 @@ body:             |
 ...
 ---
 name:            test_insert_128_idx1
-# ALL-LABEL: name:  test_insert_128_idx1
 alignment:       16
 legalized:       true
 regBankSelected: true
@@ -90,21 +104,28 @@ registers:
   - { id: 0, class: vecr }
   - { id: 1, class: vecr }
   - { id: 2, class: vecr }
-# AVX:               %0:vr256 = COPY $ymm0
-# AVX-NEXT:          %1:vr128 = COPY $xmm1
-# AVX-NEXT:          %2:vr256 = VINSERTF128rri %0, %1, 1
-# AVX-NEXT:          $ymm0 = COPY %2
-# AVX-NEXT:          RET 0, implicit $ymm0
 #
-# AVX512VL:          %0:vr256x = COPY $ymm0
-# AVX512VL-NEXT:     %1:vr128x = COPY $xmm1
-# AVX512VL-NEXT:     %2:vr256x = VINSERTF32X4Z256rri %0, %1, 1
-# AVX512VL-NEXT:     $ymm0 = COPY %2
-# AVX512VL-NEXT:     RET 0, implicit $ymm0
 body:             |
   bb.1 (%ir-block.0):
     liveins: $ymm0, $ymm1
 
+    ; AVX-LABEL: name: test_insert_128_idx1
+    ; AVX: liveins: $ymm0, $ymm1
+    ; AVX-NEXT: {{  $}}
+    ; AVX-NEXT: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0
+    ; AVX-NEXT: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1
+    ; AVX-NEXT: [[VINSERTF128rri:%[0-9]+]]:vr256 = VINSERTF128rri [[COPY]], [[COPY1]], 1
+    ; AVX-NEXT: $ymm0 = COPY [[VINSERTF128rri]]
+    ; AVX-NEXT: RET 0, implicit $ymm0
+    ;
+    ; AVX512VL-LABEL: name: test_insert_128_idx1
+    ; AVX512VL: liveins: $ymm0, $ymm1
+    ; AVX512VL-NEXT: {{  $}}
+    ; AVX512VL-NEXT: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0
+    ; AVX512VL-NEXT: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
+    ; AVX512VL-NEXT: [[VINSERTF32X4Z256rri:%[0-9]+]]:vr256x = VINSERTF32X4Z256rri [[COPY]], [[COPY1]], 1
+    ; AVX512VL-NEXT: $ymm0 = COPY [[VINSERTF32X4Z256rri]]
+    ; AVX512VL-NEXT: RET 0, implicit $ymm0
     %0(<8 x s32>) = COPY $ymm0
     %1(<4 x s32>) = COPY $xmm1
     %2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 128
@@ -113,7 +134,6 @@ body:             |
 ...
 ---
 name:            test_insert_128_idx1_undef
-# ALL-LABEL: name:  test_insert_128_idx1_undef
 alignment:       16
 legalized:       true
 regBankSelected: true
@@ -121,21 +141,28 @@ registers:
   - { id: 0, class: vecr }
   - { id: 1, class: vecr }
   - { id: 2, class: vecr }
-# AVX:               %0:vr256 = IMPLICIT_DEF
-# AVX-NEXT:          %1:vr128 = COPY $xmm1
-# AVX-NEXT:          %2:vr256 = VINSERTF128rri %0, %1, 1
-# AVX-NEXT:          $ymm0 = COPY %2
-# AVX-NEXT:          RET 0, implicit $ymm0
 #
-# AVX512VL:          %0:vr256x = IMPLICIT_DEF
-# AVX512VL-NEXT:     %1:vr128x = COPY $xmm1
-# AVX512VL-NEXT:     %2:vr256x = VINSERTF32X4Z256rri %0, %1, 1
-# AVX512VL-NEXT:     $ymm0 = COPY %2
-# AVX512VL-NEXT:     RET 0, implicit $ymm0
 body:             |
   bb.1 (%ir-block.0):
     liveins: $ymm0, $ymm1
 
+    ; AVX-LABEL: name: test_insert_128_idx1_undef
+    ; AVX: liveins: $ymm0, $ymm1
+    ; AVX-NEXT: {{  $}}
+    ; AVX-NEXT: [[DEF:%[0-9]+]]:vr256 = IMPLICIT_DEF
+    ; AVX-NEXT: [[COPY:%[0-9]+]]:vr128 = COPY $xmm1
+    ; AVX-NEXT: [[VINSERTF128rri:%[0-9]+]]:vr256 = VINSERTF128rri [[DEF]], [[COPY]], 1
+    ; AVX-NEXT: $ymm0 = COPY [[VINSERTF128rri]]
+    ; AVX-NEXT: RET 0, implicit $ymm0
+    ;
+    ; AVX512VL-LABEL: name: test_insert_128_idx1_undef
+    ; AVX512VL: liveins: $ymm0, $ymm1
+    ; AVX512VL-NEXT: {{  $}}
+    ; AVX512VL-NEXT: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
+    ; AVX512VL-NEXT: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
+    ; AVX512VL-NEXT: [[VINSERTF32X4Z256rri:%[0-9]+]]:vr256x = VINSERTF32X4Z256rri [[DEF]], [[COPY]], 1
+    ; AVX512VL-NEXT: $ymm0 = COPY [[VINSERTF32X4Z256rri]]
+    ; AVX512VL-NEXT: RET 0, implicit $ymm0
     %0(<8 x s32>) = IMPLICIT_DEF
     %1(<4 x s32>) = COPY $xmm1
     %2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 128

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
index 6fb59df0736da..1a74cf0306863 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
@@ -49,11 +49,13 @@ body:             |
     liveins: $zmm0, $ymm1
 
     ; ALL-LABEL: name: test_insert_128_idx0
-    ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
-    ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
-    ; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[COPY]], [[COPY1]], 0
-    ; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri]]
-    ; ALL: RET 0, implicit $ymm0
+    ; ALL: liveins: $zmm0, $ymm1
+    ; ALL-NEXT: {{  $}}
+    ; ALL-NEXT: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+    ; ALL-NEXT: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
+    ; ALL-NEXT: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[COPY]], [[COPY1]], 0
+    ; ALL-NEXT: $zmm0 = COPY [[VINSERTF32X4Zrri]]
+    ; ALL-NEXT: RET 0, implicit $ymm0
     %0(<16 x s32>) = COPY $zmm0
     %1(<4 x s32>) = COPY $xmm1
     %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0
@@ -75,10 +77,13 @@ body:             |
     liveins: $ymm0, $ymm1
 
     ; ALL-LABEL: name: test_insert_128_idx0_undef
-    ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
-    ; ALL: undef %2.sub_xmm:vr512 = COPY [[COPY]]
-    ; ALL: $zmm0 = COPY %2
-    ; ALL: RET 0, implicit $ymm0
+    ; ALL: liveins: $ymm0, $ymm1
+    ; ALL-NEXT: {{  $}}
+    ; ALL-NEXT: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
+    ; ALL-NEXT: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
+    ; ALL-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vr512 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_xmm
+    ; ALL-NEXT: $zmm0 = COPY [[INSERT_SUBREG]]
+    ; ALL-NEXT: RET 0, implicit $ymm0
     %0(<16 x s32>) = IMPLICIT_DEF
     %1(<4 x s32>) = COPY $xmm1
     %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0
@@ -100,11 +105,13 @@ body:             |
     liveins: $ymm0, $ymm1
 
     ; ALL-LABEL: name: test_insert_128_idx1
-    ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
-    ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
-    ; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[COPY]], [[COPY1]], 1
-    ; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri]]
-    ; ALL: RET 0, implicit $ymm0
+    ; ALL: liveins: $ymm0, $ymm1
+    ; ALL-NEXT: {{  $}}
+    ; ALL-NEXT: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+    ; ALL-NEXT: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
+    ; ALL-NEXT: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[COPY]], [[COPY1]], 1
+    ; ALL-NEXT: $zmm0 = COPY [[VINSERTF32X4Zrri]]
+    ; ALL-NEXT: RET 0, implicit $ymm0
     %0(<16 x s32>) = COPY $zmm0
     %1(<4 x s32>) = COPY $xmm1
     %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 128
@@ -125,11 +132,13 @@ body:             |
     liveins: $ymm0, $ymm1
 
     ; ALL-LABEL: name: test_insert_128_idx1_undef
-    ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
-    ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
-    ; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[DEF]], [[COPY]], 1
-    ; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri]]
-    ; ALL: RET 0, implicit $ymm0
+    ; ALL: liveins: $ymm0, $ymm1
+    ; ALL-NEXT: {{  $}}
+    ; ALL-NEXT: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
+    ; ALL-NEXT: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
+    ; ALL-NEXT: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[DEF]], [[COPY]], 1
+    ; ALL-NEXT: $zmm0 = COPY [[VINSERTF32X4Zrri]]
+    ; ALL-NEXT: RET 0, implicit $ymm0
     %0(<16 x s32>) = IMPLICIT_DEF
     %1(<4 x s32>) = COPY $xmm1
     %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 128
@@ -150,11 +159,13 @@ body:             |
     liveins: $zmm0, $ymm1
 
     ; ALL-LABEL: name: test_insert_256_idx0
-    ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
-    ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
-    ; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[COPY]], [[COPY1]], 0
-    ; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
-    ; ALL: RET 0, implicit $ymm0
+    ; ALL: liveins: $zmm0, $ymm1
+    ; ALL-NEXT: {{  $}}
+    ; ALL-NEXT: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+    ; ALL-NEXT: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
+    ; ALL-NEXT: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[COPY]], [[COPY1]], 0
+    ; ALL-NEXT: $zmm0 = COPY [[VINSERTF64X4Zrri]]
+    ; ALL-NEXT: RET 0, implicit $ymm0
     %0(<16 x s32>) = COPY $zmm0
     %1(<8 x s32>) = COPY $ymm1
     %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0
@@ -176,10 +187,13 @@ body:             |
     liveins: $ymm0, $ymm1
 
     ; ALL-LABEL: name: test_insert_256_idx0_undef
-    ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1
-    ; ALL: undef %2.sub_ymm:vr512 = COPY [[COPY]]
-    ; ALL: $zmm0 = COPY %2
-    ; ALL: RET 0, implicit $ymm0
+    ; ALL: liveins: $ymm0, $ymm1
+    ; ALL-NEXT: {{  $}}
+    ; ALL-NEXT: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1
+    ; ALL-NEXT: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
+    ; ALL-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vr512 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_ymm
+    ; ALL-NEXT: $zmm0 = COPY [[INSERT_SUBREG]]
+    ; ALL-NEXT: RET 0, implicit $ymm0
     %0(<16 x s32>) = IMPLICIT_DEF
     %1(<8 x s32>) = COPY $ymm1
     %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0
@@ -201,11 +215,13 @@ body:             |
     liveins: $ymm0, $ymm1
 
     ; ALL-LABEL: name: test_insert_256_idx1
-    ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
-    ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
-    ; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[COPY]], [[COPY1]], 1
-    ; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
-    ; ALL: RET 0, implicit $ymm0
+    ; ALL: liveins: $ymm0, $ymm1
+    ; ALL-NEXT: {{  $}}
+    ; ALL-NEXT: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+    ; ALL-NEXT: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
+    ; ALL-NEXT: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[COPY]], [[COPY1]], 1
+    ; ALL-NEXT: $zmm0 = COPY [[VINSERTF64X4Zrri]]
+    ; ALL-NEXT: RET 0, implicit $ymm0
     %0(<16 x s32>) = COPY $zmm0
     %1(<8 x s32>) = COPY $ymm1
     %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 256
@@ -226,11 +242,13 @@ body:             |
     liveins: $ymm0, $ymm1
 
     ; ALL-LABEL: name: test_insert_256_idx1_undef
-    ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
-    ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1
-    ; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[DEF]], [[COPY]], 1
-    ; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
-    ; ALL: RET 0, implicit $ymm0
+    ; ALL: liveins: $ymm0, $ymm1
+    ; ALL-NEXT: {{  $}}
+    ; ALL-NEXT: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
+    ; ALL-NEXT: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1
+    ; ALL-NEXT: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[DEF]], [[COPY]], 1
+    ; ALL-NEXT: $zmm0 = COPY [[VINSERTF64X4Zrri]]
+    ; ALL-NEXT: RET 0, implicit $ymm0
     %0(<16 x s32>) = IMPLICIT_DEF
     %1(<8 x s32>) = COPY $ymm1
     %2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 256

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
index 83ce6eb0b17be..cd728faaf9e8a 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
@@ -22,16 +22,19 @@ body:             |
 
     ; AVX-LABEL: name: test_merge
     ; AVX: [[DEF:%[0-9]+]]:vr128 = IMPLICIT_DEF
-    ; AVX: undef %2.sub_xmm:vr256 = COPY [[DEF]]
-    ; AVX: [[VINSERTF128rri:%[0-9]+]]:vr256 = VINSERTF128rri %2, [[DEF]], 1
-    ; AVX: $ymm0 = COPY [[VINSERTF128rri]]
-    ; AVX: RET 0, implicit $ymm0
+    ; AVX-NEXT: [[DEF1:%[0-9]+]]:vr256 = IMPLICIT_DEF
+    ; AVX-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vr256 = INSERT_SUBREG [[DEF1]], [[DEF]], %subreg.sub_xmm
+    ; AVX-NEXT: [[VINSERTF128rri:%[0-9]+]]:vr256 = VINSERTF128rri [[INSERT_SUBREG]], [[DEF]], 1
+    ; AVX-NEXT: $ymm0 = COPY [[VINSERTF128rri]]
+    ; AVX-NEXT: RET 0, implicit $ymm0
+    ;
     ; AVX512VL-LABEL: name: test_merge
     ; AVX512VL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF
-    ; AVX512VL: undef %2.sub_xmm:vr256x = COPY [[DEF]]
-    ; AVX512VL: [[VINSERTF32X4Z256rri:%[0-9]+]]:vr256x = VINSERTF32X4Z256rri %2, [[DEF]], 1
-    ; AVX512VL: $ymm0 = COPY [[VINSERTF32X4Z256rri]]
-    ; AVX512VL: RET 0, implicit $ymm0
+    ; AVX512VL-NEXT: [[DEF1:%[0-9]+]]:vr256x = IMPLICIT_DEF
+    ; AVX512VL-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vr256x = INSERT_SUBREG [[DEF1]], [[DEF]], %subreg.sub_xmm
+    ; AVX512VL-NEXT: [[VINSERTF32X4Z256rri:%[0-9]+]]:vr256x = VINSERTF32X4Z256rri [[INSERT_SUBREG]], [[DEF]], 1
+    ; AVX512VL-NEXT: $ymm0 = COPY [[VINSERTF32X4Z256rri]]
+    ; AVX512VL-NEXT: RET 0, implicit $ymm0
     %0(<4 x s32>) = IMPLICIT_DEF
     %1(<8 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>)
     $ymm0 = COPY %1(<8 x s32>)

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
index d8e3c3aea262b..3f3ee17eb934d 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
@@ -23,12 +23,13 @@ body:             |
 
     ; ALL-LABEL: name: test_merge_v128
     ; ALL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF
-    ; ALL: undef %2.sub_xmm:vr512 = COPY [[DEF]]
-    ; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri %2, [[DEF]], 1
-    ; ALL: [[VINSERTF32X4Zrri1:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[VINSERTF32X4Zrri]], [[DEF]], 2
-    ; ALL: [[VINSERTF32X4Zrri2:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[VINSERTF32X4Zrri1]], [[DEF]], 3
-    ; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri2]]
-    ; ALL: RET 0, implicit $zmm0
+    ; ALL-NEXT: [[DEF1:%[0-9]+]]:vr512 = IMPLICIT_DEF
+    ; ALL-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vr512 = INSERT_SUBREG [[DEF1]], [[DEF]], %subreg.sub_xmm
+    ; ALL-NEXT: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[INSERT_SUBREG]], [[DEF]], 1
+    ; ALL-NEXT: [[VINSERTF32X4Zrri1:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[VINSERTF32X4Zrri]], [[DEF]], 2
+    ; ALL-NEXT: [[VINSERTF32X4Zrri2:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[VINSERTF32X4Zrri1]], [[DEF]], 3
+    ; ALL-NEXT: $zmm0 = COPY [[VINSERTF32X4Zrri2]]
+    ; ALL-NEXT: RET 0, implicit $zmm0
     %0(<4 x s32>) = IMPLICIT_DEF
     %1(<16 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>)
     $zmm0 = COPY %1(<16 x s32>)
@@ -48,10 +49,11 @@ body:             |
 
     ; ALL-LABEL: name: test_merge_v256
     ; ALL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
-    ; ALL: undef %2.sub_ymm:vr512 = COPY [[DEF]]
-    ; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri %2, [[DEF]], 1
-    ; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
-    ; ALL: RET 0, implicit $zmm0
+    ; ALL-NEXT: [[DEF1:%[0-9]+]]:vr512 = IMPLICIT_DEF
+    ; ALL-NEXT: [[INSERT_SUBREG:%[0-9]+]]:vr512 = INSERT_SUBREG [[DEF1]], [[DEF]], %subreg.sub_ymm
+    ; ALL-NEXT: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[INSERT_SUBREG]], [[DEF]], 1
+    ; ALL-NEXT: $zmm0 = COPY [[VINSERTF64X4Zrri]]
+    ; ALL-NEXT: RET 0, implicit $zmm0
     %0(<8 x s32>) = IMPLICIT_DEF
     %1(<16 x s32>) = G_CONCAT_VECTORS %0(<8 x s32>), %0(<8 x s32>)
     $zmm0 = COPY %1(<16 x s32>)


        


More information about the llvm-commits mailing list