[llvm] 614a064 - [X86] Add missing immediate qualifier to the (V)INSERT/EXTRACT/PERM2 instruction names (#108593)

via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 15 03:42:17 PDT 2024


Author: Simon Pilgrim
Date: 2024-09-15T11:42:13+01:00
New Revision: 614a064cac4f8d0e9c53fafd7876a71be84b8610

URL: https://github.com/llvm/llvm-project/commit/614a064cac4f8d0e9c53fafd7876a71be84b8610
DIFF: https://github.com/llvm/llvm-project/commit/614a064cac4f8d0e9c53fafd7876a71be84b8610.diff

LOG: [X86] Add missing immediate qualifier to the (V)INSERT/EXTRACT/PERM2 instruction names (#108593)

Makes it easier to algorithmically recreate the instruction name in various analysis scripts I'm working on

Added: 
    

Modified: 
    llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
    llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
    llvm/lib/Target/X86/X86CompressEVEX.cpp
    llvm/lib/Target/X86/X86InstrAVX512.td
    llvm/lib/Target/X86/X86InstrInfo.cpp
    llvm/lib/Target/X86/X86InstrSSE.td
    llvm/lib/Target/X86/X86ReplaceableInstrs.def
    llvm/lib/Target/X86/X86SchedAlderlakeP.td
    llvm/lib/Target/X86/X86SchedBroadwell.td
    llvm/lib/Target/X86/X86SchedHaswell.td
    llvm/lib/Target/X86/X86SchedSandyBridge.td
    llvm/lib/Target/X86/X86SchedSapphireRapids.td
    llvm/lib/Target/X86/X86SchedSkylakeClient.td
    llvm/lib/Target/X86/X86SchedSkylakeServer.td
    llvm/lib/Target/X86/X86ScheduleBdVer2.td
    llvm/lib/Target/X86/X86ScheduleBtVer2.td
    llvm/lib/Target/X86/X86ScheduleZnver1.td
    llvm/lib/Target/X86/X86ScheduleZnver2.td
    llvm/lib/Target/X86/X86ScheduleZnver3.td
    llvm/lib/Target/X86/X86ScheduleZnver4.td
    llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir
    llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir
    llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
    llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
    llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
    llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
    llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir
    llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir
    llvm/test/CodeGen/X86/evex-to-vex-compress.mir
    llvm/test/CodeGen/X86/opt_phis2.mir
    llvm/test/TableGen/x86-fold-tables.inc
    llvm/utils/TableGen/X86ManualFoldTables.def
    llvm/utils/TableGen/X86ManualInstrMapping.def

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
index 2fb499122fbbfb..d2ee0f1bac6831 100644
--- a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
@@ -1255,16 +1255,16 @@ bool X86InstructionSelector::selectExtract(MachineInstr &I,
 
   if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
     if (HasVLX)
-      I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
+      I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rri));
     else if (HasAVX)
-      I.setDesc(TII.get(X86::VEXTRACTF128rr));
+      I.setDesc(TII.get(X86::VEXTRACTF128rri));
     else
       return false;
   } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
     if (DstTy.getSizeInBits() == 128)
-      I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
+      I.setDesc(TII.get(X86::VEXTRACTF32x4Zrri));
     else if (DstTy.getSizeInBits() == 256)
-      I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
+      I.setDesc(TII.get(X86::VEXTRACTF64x4Zrri));
     else
       return false;
   } else
@@ -1388,16 +1388,16 @@ bool X86InstructionSelector::selectInsert(MachineInstr &I,
 
   if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
     if (HasVLX)
-      I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
+      I.setDesc(TII.get(X86::VINSERTF32x4Z256rri));
     else if (HasAVX)
-      I.setDesc(TII.get(X86::VINSERTF128rr));
+      I.setDesc(TII.get(X86::VINSERTF128rri));
     else
       return false;
   } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
     if (InsertRegTy.getSizeInBits() == 128)
-      I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
+      I.setDesc(TII.get(X86::VINSERTF32x4Zrri));
     else if (InsertRegTy.getSizeInBits() == 256)
-      I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
+      I.setDesc(TII.get(X86::VINSERTF64x4Zrri));
     else
       return false;
   } else

diff  --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
index 84680b06bcc28f..95038ccf63b8b8 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
@@ -1158,13 +1158,13 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
     DestName = getRegName(MI->getOperand(0).getReg());
     break;
 
-  case X86::VPERM2F128rr:
-  case X86::VPERM2I128rr:
+  case X86::VPERM2F128rri:
+  case X86::VPERM2I128rri:
     Src2Name = getRegName(MI->getOperand(2).getReg());
     [[fallthrough]];
 
-  case X86::VPERM2F128rm:
-  case X86::VPERM2I128rm:
+  case X86::VPERM2F128rmi:
+  case X86::VPERM2I128rmi:
     // For instruction comments purpose, assume the 256-bit vector is v4i64.
     if (MI->getOperand(NumOperands - 1).isImm())
       DecodeVPERM2X128Mask(4, MI->getOperand(NumOperands - 1).getImm(),

diff  --git a/llvm/lib/Target/X86/X86CompressEVEX.cpp b/llvm/lib/Target/X86/X86CompressEVEX.cpp
index 7343af1bdc9a5a..a909440f983173 100644
--- a/llvm/lib/Target/X86/X86CompressEVEX.cpp
+++ b/llvm/lib/Target/X86/X86CompressEVEX.cpp
@@ -138,8 +138,8 @@ static bool performCustomAdjustments(MachineInstr &MI, unsigned NewOpc) {
   case X86::VSHUFI32X4Z256rri:
   case X86::VSHUFI64X2Z256rmi:
   case X86::VSHUFI64X2Z256rri: {
-    assert((NewOpc == X86::VPERM2F128rr || NewOpc == X86::VPERM2I128rr ||
-            NewOpc == X86::VPERM2F128rm || NewOpc == X86::VPERM2I128rm) &&
+    assert((NewOpc == X86::VPERM2F128rri || NewOpc == X86::VPERM2I128rri ||
+            NewOpc == X86::VPERM2F128rmi || NewOpc == X86::VPERM2I128rmi) &&
            "Unexpected new opcode!");
     MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands() - 1);
     int64_t ImmVal = Imm.getImm();

diff  --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 684d41d013d19e..a86ec240b91935 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -368,7 +368,7 @@ multiclass vinsert_for_size_split<int Opcode, X86VectorVTInfo From,
                                   SDPatternOperator vinsert_for_mask,
                                   X86FoldableSchedWrite sched> {
   let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
-    defm rr : AVX512_maskable_split<Opcode, MRMSrcReg, To, (outs To.RC:$dst),
+    defm rri : AVX512_maskable_split<Opcode, MRMSrcReg, To, (outs To.RC:$dst),
                    (ins To.RC:$src1, From.RC:$src2, u8imm:$src3),
                    "vinsert" # From.EltTypeName # "x" # From.NumElts,
                    "$src3, $src2, $src1", "$src1, $src2, $src3",
@@ -380,7 +380,7 @@ multiclass vinsert_for_size_split<int Opcode, X86VectorVTInfo From,
                                            (iPTR imm))>,
                    AVX512AIi8Base, EVEX, VVVV, Sched<[sched]>;
     let mayLoad = 1 in
-    defm rm : AVX512_maskable_split<Opcode, MRMSrcMem, To, (outs To.RC:$dst),
+    defm rmi : AVX512_maskable_split<Opcode, MRMSrcMem, To, (outs To.RC:$dst),
                    (ins To.RC:$src1, From.MemOp:$src2, u8imm:$src3),
                    "vinsert" # From.EltTypeName # "x" # From.NumElts,
                    "$src3, $src2, $src1", "$src1, $src2, $src3",
@@ -408,7 +408,7 @@ multiclass vinsert_for_size_lowering<string InstrStr, X86VectorVTInfo From,
   let Predicates = p in {
     def : Pat<(vinsert_insert:$ins
                      (To.VT To.RC:$src1), (From.VT From.RC:$src2), (iPTR imm)),
-              (To.VT (!cast<Instruction>(InstrStr#"rr")
+              (To.VT (!cast<Instruction>(InstrStr#"rri")
                      To.RC:$src1, From.RC:$src2,
                      (INSERT_get_vinsert_imm To.RC:$ins)))>;
 
@@ -416,7 +416,7 @@ multiclass vinsert_for_size_lowering<string InstrStr, X86VectorVTInfo From,
                   (To.VT To.RC:$src1),
                   (From.VT (From.LdFrag addr:$src2)),
                   (iPTR imm)),
-              (To.VT (!cast<Instruction>(InstrStr#"rm")
+              (To.VT (!cast<Instruction>(InstrStr#"rmi")
                   To.RC:$src1, addr:$src2,
                   (INSERT_get_vinsert_imm To.RC:$ins)))>;
   }
@@ -529,7 +529,7 @@ let Predicates = p in {
                                                  (From.VT From.RC:$src2),
                                                  (iPTR imm))),
                            Cast.RC:$src0)),
-            (!cast<Instruction>(InstrStr#"rrk")
+            (!cast<Instruction>(InstrStr#"rrik")
              Cast.RC:$src0, Cast.KRCWM:$mask, To.RC:$src1, From.RC:$src2,
              (INSERT_get_vinsert_imm To.RC:$ins))>;
   def : Pat<(Cast.VT
@@ -541,7 +541,7 @@ let Predicates = p in {
                                                    (From.LdFrag addr:$src2))),
                                                  (iPTR imm))),
                            Cast.RC:$src0)),
-            (!cast<Instruction>(InstrStr#"rmk")
+            (!cast<Instruction>(InstrStr#"rmik")
              Cast.RC:$src0, Cast.KRCWM:$mask, To.RC:$src1, addr:$src2,
              (INSERT_get_vinsert_imm To.RC:$ins))>;
 
@@ -552,7 +552,7 @@ let Predicates = p in {
                                                  (From.VT From.RC:$src2),
                                                  (iPTR imm))),
                            Cast.ImmAllZerosV)),
-            (!cast<Instruction>(InstrStr#"rrkz")
+            (!cast<Instruction>(InstrStr#"rrikz")
              Cast.KRCWM:$mask, To.RC:$src1, From.RC:$src2,
              (INSERT_get_vinsert_imm To.RC:$ins))>;
   def : Pat<(Cast.VT
@@ -562,7 +562,7 @@ let Predicates = p in {
                                                  (From.VT (From.LdFrag addr:$src2)),
                                                  (iPTR imm))),
                            Cast.ImmAllZerosV)),
-            (!cast<Instruction>(InstrStr#"rmkz")
+            (!cast<Instruction>(InstrStr#"rmikz")
              Cast.KRCWM:$mask, To.RC:$src1, addr:$src2,
              (INSERT_get_vinsert_imm To.RC:$ins))>;
 }
@@ -677,7 +677,7 @@ multiclass vextract_for_size_split<int Opcode,
                                    SchedWrite SchedRR, SchedWrite SchedMR> {
 
   let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
-    defm rr : AVX512_maskable_split<Opcode, MRMDestReg, To, (outs To.RC:$dst),
+    defm rri : AVX512_maskable_split<Opcode, MRMDestReg, To, (outs To.RC:$dst),
                 (ins From.RC:$src1, u8imm:$idx),
                 "vextract" # To.EltTypeName # "x" # To.NumElts,
                 "$idx, $src1", "$src1, $idx",
@@ -685,7 +685,7 @@ multiclass vextract_for_size_split<int Opcode,
                 (vextract_for_mask:$idx (From.VT From.RC:$src1), (iPTR imm))>,
                 AVX512AIi8Base, EVEX, Sched<[SchedRR]>;
 
-    def mr  : AVX512AIi8<Opcode, MRMDestMem, (outs),
+    def mri  : AVX512AIi8<Opcode, MRMDestMem, (outs),
                     (ins To.MemOp:$dst, From.RC:$src1, u8imm:$idx),
                     "vextract" # To.EltTypeName # "x" # To.NumElts #
                         "\t{$idx, $src1, $dst|$dst, $src1, $idx}",
@@ -695,7 +695,7 @@ multiclass vextract_for_size_split<int Opcode,
                     Sched<[SchedMR]>;
 
     let mayStore = 1, hasSideEffects = 0 in
-    def mrk : AVX512AIi8<Opcode, MRMDestMem, (outs),
+    def mrik : AVX512AIi8<Opcode, MRMDestMem, (outs),
                     (ins To.MemOp:$dst, To.KRCWM:$mask,
                                         From.RC:$src1, u8imm:$idx),
                      "vextract" # To.EltTypeName # "x" # To.NumElts #
@@ -718,12 +718,12 @@ multiclass vextract_for_size_lowering<string InstrStr, X86VectorVTInfo From,
                 SDNodeXForm EXTRACT_get_vextract_imm, list<Predicate> p> {
   let Predicates = p in {
      def : Pat<(vextract_extract:$ext (From.VT From.RC:$src1), (iPTR imm)),
-               (To.VT (!cast<Instruction>(InstrStr#"rr")
+               (To.VT (!cast<Instruction>(InstrStr#"rri")
                           From.RC:$src1,
                           (EXTRACT_get_vextract_imm To.RC:$ext)))>;
      def : Pat<(store (To.VT (vextract_extract:$ext (From.VT From.RC:$src1),
                               (iPTR imm))), addr:$dst),
-               (!cast<Instruction>(InstrStr#"mr") addr:$dst, From.RC:$src1,
+               (!cast<Instruction>(InstrStr#"mri") addr:$dst, From.RC:$src1,
                 (EXTRACT_get_vextract_imm To.RC:$ext))>;
   }
 }
@@ -828,31 +828,31 @@ defm : vextract_for_size_lowering<"VEXTRACTF64x4Z", v32bf16_info, v16bf16x_info,
 // smaller extract to enable EVEX->VEX.
 let Predicates = [NoVLX, HasEVEX512] in {
 def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 2))),
-          (v2i64 (VEXTRACTI128rr
+          (v2i64 (VEXTRACTI128rri
                   (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 2))),
-          (v2f64 (VEXTRACTF128rr
+          (v2f64 (VEXTRACTF128rri
                   (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 4))),
-          (v4i32 (VEXTRACTI128rr
+          (v4i32 (VEXTRACTI128rri
                   (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 4))),
-          (v4f32 (VEXTRACTF128rr
+          (v4f32 (VEXTRACTF128rri
                   (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v8i16 (extract_subvector (v32i16 VR512:$src), (iPTR 8))),
-          (v8i16 (VEXTRACTI128rr
+          (v8i16 (VEXTRACTI128rri
                   (v16i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v8f16 (extract_subvector (v32f16 VR512:$src), (iPTR 8))),
-          (v8f16 (VEXTRACTF128rr
+          (v8f16 (VEXTRACTF128rri
                   (v16f16 (EXTRACT_SUBREG (v32f16 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 16))),
-          (v16i8 (VEXTRACTI128rr
+          (v16i8 (VEXTRACTI128rri
                   (v32i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 }
@@ -861,31 +861,31 @@ def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 16))),
 // smaller extract to enable EVEX->VEX.
 let Predicates = [HasVLX] in {
 def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 2))),
-          (v2i64 (VEXTRACTI32x4Z256rr
+          (v2i64 (VEXTRACTI32x4Z256rri
                   (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 2))),
-          (v2f64 (VEXTRACTF32x4Z256rr
+          (v2f64 (VEXTRACTF32x4Z256rri
                   (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 4))),
-          (v4i32 (VEXTRACTI32x4Z256rr
+          (v4i32 (VEXTRACTI32x4Z256rri
                   (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 4))),
-          (v4f32 (VEXTRACTF32x4Z256rr
+          (v4f32 (VEXTRACTF32x4Z256rri
                   (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v8i16 (extract_subvector (v32i16 VR512:$src), (iPTR 8))),
-          (v8i16 (VEXTRACTI32x4Z256rr
+          (v8i16 (VEXTRACTI32x4Z256rri
                   (v16i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v8f16 (extract_subvector (v32f16 VR512:$src), (iPTR 8))),
-          (v8f16 (VEXTRACTF32x4Z256rr
+          (v8f16 (VEXTRACTF32x4Z256rri
                   (v16f16 (EXTRACT_SUBREG (v32f16 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 16))),
-          (v16i8 (VEXTRACTI32x4Z256rr
+          (v16i8 (VEXTRACTI32x4Z256rri
                   (v32i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_ymm)),
                   (iPTR 1)))>;
 }
@@ -904,7 +904,7 @@ let Predicates = p in {
                                     (To.VT (vextract_extract:$ext
                                             (From.VT From.RC:$src), (iPTR imm)))),
                                    To.RC:$src0)),
-            (Cast.VT (!cast<Instruction>(InstrStr#"rrk")
+            (Cast.VT (!cast<Instruction>(InstrStr#"rrik")
                       Cast.RC:$src0, Cast.KRCWM:$mask, From.RC:$src,
                       (EXTRACT_get_vextract_imm To.RC:$ext)))>;
 
@@ -913,7 +913,7 @@ let Predicates = p in {
                                     (To.VT (vextract_extract:$ext
                                             (From.VT From.RC:$src), (iPTR imm)))),
                                    Cast.ImmAllZerosV)),
-            (Cast.VT (!cast<Instruction>(InstrStr#"rrkz")
+            (Cast.VT (!cast<Instruction>(InstrStr#"rrikz")
                       Cast.KRCWM:$mask, From.RC:$src,
                       (EXTRACT_get_vextract_imm To.RC:$ext)))>;
 }

diff  --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 378eb00443ab6e..a74da000af0cee 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -2597,8 +2597,8 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
         .setImm(X86::getSwappedVCMPImm(
             MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 0x1f));
     break;
-  case X86::VPERM2F128rr:
-  case X86::VPERM2I128rr:
+  case X86::VPERM2F128rri:
+  case X86::VPERM2I128rri:
     // Flip permute source immediate.
     // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
     // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
@@ -6258,16 +6258,16 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
                            get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
   case X86::VMOVAPSZ128mr_NOVLX:
     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr),
-                            get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
+                            get(X86::VEXTRACTF32x4Zmri), X86::sub_xmm);
   case X86::VMOVUPSZ128mr_NOVLX:
     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr),
-                            get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
+                            get(X86::VEXTRACTF32x4Zmri), X86::sub_xmm);
   case X86::VMOVAPSZ256mr_NOVLX:
     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr),
-                            get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
+                            get(X86::VEXTRACTF64x4Zmri), X86::sub_ymm);
   case X86::VMOVUPSZ256mr_NOVLX:
     return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
-                            get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
+                            get(X86::VEXTRACTF64x4Zmri), X86::sub_ymm);
   case X86::MOV32ri64: {
     Register Reg = MIB.getReg(0);
     Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
@@ -6775,8 +6775,8 @@ static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
   case X86::VPACKUSWBZ128rr:
   case X86::VPACKSSDWZ128rr:
   case X86::VPACKUSDWZ128rr:
-  case X86::VPERM2F128rr:
-  case X86::VPERM2I128rr:
+  case X86::VPERM2F128rri:
+  case X86::VPERM2I128rri:
   case X86::VSHUFF32X4Z256rri:
   case X86::VSHUFF32X4Zrri:
   case X86::VSHUFF64X2Z256rri:

diff  --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 6fc7876e1d7d28..18d7e54ac54612 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -7164,11 +7164,11 @@ let Predicates = [HasAVXNECONVERT, NoVLX] in
 
 let ExeDomain = SSEPackedSingle in {
 let isCommutable = 1 in
-def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
+def VPERM2F128rri : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
           (ins VR256:$src1, VR256:$src2, u8imm:$src3),
           "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>,
           VEX, VVVV, VEX_L, Sched<[WriteFShuffle256]>;
-def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
+def VPERM2F128rmi : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
           (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
           "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>,
           VEX, VVVV, VEX_L, Sched<[WriteFShuffle256.Folded, WriteFShuffle256.ReadAfterFold]>;
@@ -7181,12 +7181,12 @@ def Perm2XCommuteImm : SDNodeXForm<timm, [{
 
 multiclass vperm2x128_lowering<string InstrStr, ValueType VT, PatFrag memop_frag> {
   def : Pat<(VT (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 timm:$imm))),
-            (!cast<Instruction>(InstrStr#rr) VR256:$src1, VR256:$src2, timm:$imm)>;
+            (!cast<Instruction>(InstrStr#rri) VR256:$src1, VR256:$src2, timm:$imm)>;
   def : Pat<(VT (X86VPerm2x128 VR256:$src1, (memop_frag addr:$src2), (i8 timm:$imm))),
-            (!cast<Instruction>(InstrStr#rm) VR256:$src1, addr:$src2, timm:$imm)>;
+            (!cast<Instruction>(InstrStr#rmi) VR256:$src1, addr:$src2, timm:$imm)>;
   // Pattern with load in other operand.
   def : Pat<(VT (X86VPerm2x128 (memop_frag addr:$src2), VR256:$src1, (i8 timm:$imm))),
-            (!cast<Instruction>(InstrStr#rm) VR256:$src1, addr:$src2,
+            (!cast<Instruction>(InstrStr#rmi) VR256:$src1, addr:$src2,
                                              (Perm2XCommuteImm timm:$imm))>;
 }
 
@@ -7207,12 +7207,12 @@ let Predicates = [HasAVX1Only] in {
 // VINSERTF128 - Insert packed floating-point values
 //
 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
-def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
+def VINSERTF128rri : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
           (ins VR256:$src1, VR128:$src2, u8imm:$src3),
           "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
           []>, Sched<[WriteFShuffle256]>, VEX, VVVV, VEX_L;
 let mayLoad = 1 in
-def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
+def VINSERTF128rmi : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
           (ins VR256:$src1, f128mem:$src2, u8imm:$src3),
           "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
           []>, Sched<[WriteFShuffle256.Folded, WriteFShuffle256.ReadAfterFold]>, VEX, VVVV, VEX_L;
@@ -7230,18 +7230,18 @@ multiclass vinsert_lowering<string InstrStr, string PermStr,
                             PatFrag frommemop_frag, PatFrag tomemop_frag> {
   def : Pat<(vinsert128_insert:$ins (To VR256:$src1), (From VR128:$src2),
                                    (iPTR imm)),
-            (!cast<Instruction>(InstrStr#rr) VR256:$src1, VR128:$src2,
+            (!cast<Instruction>(InstrStr#rri) VR256:$src1, VR128:$src2,
                                        (INSERT_get_vinsert128_imm VR256:$ins))>;
   def : Pat<(vinsert128_insert:$ins (To VR256:$src1),
                                     (From (frommemop_frag addr:$src2)),
                                     (iPTR imm)),
-            (!cast<Instruction>(InstrStr#rm) VR256:$src1, addr:$src2,
+            (!cast<Instruction>(InstrStr#rmi) VR256:$src1, addr:$src2,
                                        (INSERT_get_vinsert128_imm VR256:$ins))>;
   // Folding "To" vector - convert to perm2x128 and commute inputs.
   def : Pat<(vinsert128_insert:$ins (To (tomemop_frag addr:$src1)),
                                     (From VR128:$src2),
                                     (iPTR imm)),
-            (!cast<Instruction>(PermStr#rm)
+            (!cast<Instruction>(PermStr#rmi)
               (INSERT_SUBREG (To (IMPLICIT_DEF)), VR128:$src2, sub_xmm),
               addr:$src1, (INSERT_get_vperm2x128_commutedimm VR256:$ins))>;
 }
@@ -7264,12 +7264,12 @@ let Predicates = [HasAVX1Only] in {
 // VEXTRACTF128 - Extract packed floating-point values
 //
 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
-def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
+def VEXTRACTF128rri : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
           (ins VR256:$src1, u8imm:$src2),
           "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
           []>, Sched<[WriteFShuffle256]>, VEX, VEX_L;
 let mayStore = 1 in
-def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
+def VEXTRACTF128mri : AVXAIi8<0x19, MRMDestMem, (outs),
           (ins f128mem:$dst, VR256:$src1, u8imm:$src2),
           "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
           []>, Sched<[WriteFStoreX]>, VEX, VEX_L;
@@ -7277,12 +7277,12 @@ def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
 
 multiclass vextract_lowering<string InstrStr, ValueType From, ValueType To> {
   def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
-            (To (!cast<Instruction>(InstrStr#rr)
+            (To (!cast<Instruction>(InstrStr#rri)
                                     (From VR256:$src1),
                                     (EXTRACT_get_vextract128_imm VR128:$ext)))>;
   def : Pat<(store (To (vextract128_extract:$ext (From VR256:$src1),
                                                  (iPTR imm))), addr:$dst),
-            (!cast<Instruction>(InstrStr#mr) addr:$dst, VR256:$src1,
+            (!cast<Instruction>(InstrStr#mri) addr:$dst, VR256:$src1,
              (EXTRACT_get_vextract128_imm VR128:$ext))>;
 }
 
@@ -7766,30 +7766,30 @@ let Predicates = [HasAVX1Only] in {
   def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
             (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)>;
   def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
-            (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
+            (VINSERTF128rri (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
               (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), sub_xmm),
               (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), 1)>;
   def : Pat<(v8f32 (X86VBroadcast v4f32:$src)),
-            (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
+            (VINSERTF128rri (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
               (v4f32 (VPERMILPSri VR128:$src, 0)), sub_xmm),
               (v4f32 (VPERMILPSri VR128:$src, 0)), 1)>;
   def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
-            (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
+            (VINSERTF128rri (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
               (v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), sub_xmm),
               (v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), 1)>;
   def : Pat<(v4f64 (X86VBroadcast v2f64:$src)),
-            (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
+            (VINSERTF128rri (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
               (v2f64 (VMOVDDUPrr VR128:$src)), sub_xmm),
               (v2f64 (VMOVDDUPrr VR128:$src)), 1)>;
 
   def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
             (VPSHUFDri (VMOVDI2PDIrr GR32:$src), 0)>;
   def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
-            (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
+            (VINSERTF128rri (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
               (v4i32 (VPSHUFDri (VMOVDI2PDIrr GR32:$src), 0)), sub_xmm),
               (v4i32 (VPSHUFDri (VMOVDI2PDIrr GR32:$src), 0)), 1)>;
   def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
-            (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
+            (VINSERTF128rri (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
               (v4i32 (VPSHUFDri (VMOV64toPQIrr GR64:$src), 0x44)), sub_xmm),
               (v4i32 (VPSHUFDri (VMOV64toPQIrr GR64:$src), 0x44)), 1)>;
 
@@ -7799,7 +7799,7 @@ let Predicates = [HasAVX1Only] in {
             (VMOVDDUPrm addr:$src)>;
 
   def : Pat<(v4i64 (X86VBroadcast v2i64:$src)),
-            (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
+            (VINSERTF128rri (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
               (v2i64 (VPSHUFDri VR128:$src, 0x44)), sub_xmm),
               (v2i64 (VPSHUFDri VR128:$src, 0x44)), 1)>;
 }
@@ -7866,11 +7866,11 @@ defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
 // VPERM2I128 - Permute Integer vector Values in 128-bit chunks
 //
 let isCommutable = 1 in
-def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
+def VPERM2I128rri : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
           (ins VR256:$src1, VR256:$src2, u8imm:$src3),
           "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>,
           Sched<[WriteShuffle256]>, VEX, VVVV, VEX_L;
-def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
+def VPERM2I128rmi : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
           (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
           "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", []>,
           Sched<[WriteShuffle256.Folded, WriteShuffle256.ReadAfterFold]>, VEX, VVVV, VEX_L;
@@ -7888,12 +7888,12 @@ let Predicates = [HasAVX2] in {
 // VINSERTI128 - Insert packed integer values
 //
 let hasSideEffects = 0 in {
-def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
+def VINSERTI128rri : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
           (ins VR256:$src1, VR128:$src2, u8imm:$src3),
           "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
           []>, Sched<[WriteShuffle256]>, VEX, VVVV, VEX_L;
 let mayLoad = 1 in
-def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
+def VINSERTI128rmi : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
           (ins VR256:$src1, i128mem:$src2, u8imm:$src3),
           "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
           []>, Sched<[WriteShuffle256.Folded, WriteShuffle256.ReadAfterFold]>, VEX, VVVV, VEX_L;
@@ -7914,12 +7914,12 @@ let Predicates = [HasAVXNECONVERT, NoVLX] in
 //===----------------------------------------------------------------------===//
 // VEXTRACTI128 - Extract packed integer values
 //
-def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
+def VEXTRACTI128rri : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
           (ins VR256:$src1, u8imm:$src2),
           "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
           Sched<[WriteShuffle256]>, VEX, VEX_L;
 let hasSideEffects = 0, mayStore = 1 in
-def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
+def VEXTRACTI128mri : AVX2AIi8<0x39, MRMDestMem, (outs),
           (ins i128mem:$dst, VR256:$src1, u8imm:$src2),
           "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
           Sched<[SchedWriteVecMoveLS.XMM.MR]>, VEX, VEX_L;

diff  --git a/llvm/lib/Target/X86/X86ReplaceableInstrs.def b/llvm/lib/Target/X86/X86ReplaceableInstrs.def
index e1383198d3fe96..413c2677041dfb 100644
--- a/llvm/lib/Target/X86/X86ReplaceableInstrs.def
+++ b/llvm/lib/Target/X86/X86ReplaceableInstrs.def
@@ -110,30 +110,30 @@ ENTRY(VBROADCASTSDZ256rr, VBROADCASTSDZ256rr, VPBROADCASTQZ256rr)
 ENTRY(VBROADCASTSDZ256rm, VBROADCASTSDZ256rm, VPBROADCASTQZ256rm)
 ENTRY(VBROADCASTSDZrr, VBROADCASTSDZrr, VPBROADCASTQZrr)
 ENTRY(VBROADCASTSDZrm, VBROADCASTSDZrm, VPBROADCASTQZrm)
-ENTRY(VINSERTF32x4Zrr, VINSERTF32x4Zrr, VINSERTI32x4Zrr)
-ENTRY(VINSERTF32x4Zrm, VINSERTF32x4Zrm, VINSERTI32x4Zrm)
-ENTRY(VINSERTF32x8Zrr, VINSERTF32x8Zrr, VINSERTI32x8Zrr)
-ENTRY(VINSERTF32x8Zrm, VINSERTF32x8Zrm, VINSERTI32x8Zrm)
-ENTRY(VINSERTF64x2Zrr, VINSERTF64x2Zrr, VINSERTI64x2Zrr)
-ENTRY(VINSERTF64x2Zrm, VINSERTF64x2Zrm, VINSERTI64x2Zrm)
-ENTRY(VINSERTF64x4Zrr, VINSERTF64x4Zrr, VINSERTI64x4Zrr)
-ENTRY(VINSERTF64x4Zrm, VINSERTF64x4Zrm, VINSERTI64x4Zrm)
-ENTRY(VINSERTF32x4Z256rr, VINSERTF32x4Z256rr, VINSERTI32x4Z256rr)
-ENTRY(VINSERTF32x4Z256rm, VINSERTF32x4Z256rm, VINSERTI32x4Z256rm)
-ENTRY(VINSERTF64x2Z256rr, VINSERTF64x2Z256rr, VINSERTI64x2Z256rr)
-ENTRY(VINSERTF64x2Z256rm, VINSERTF64x2Z256rm, VINSERTI64x2Z256rm)
-ENTRY(VEXTRACTF32x4Zrr, VEXTRACTF32x4Zrr, VEXTRACTI32x4Zrr)
-ENTRY(VEXTRACTF32x4Zmr, VEXTRACTF32x4Zmr, VEXTRACTI32x4Zmr)
-ENTRY(VEXTRACTF32x8Zrr, VEXTRACTF32x8Zrr, VEXTRACTI32x8Zrr)
-ENTRY(VEXTRACTF32x8Zmr, VEXTRACTF32x8Zmr, VEXTRACTI32x8Zmr)
-ENTRY(VEXTRACTF64x2Zrr, VEXTRACTF64x2Zrr, VEXTRACTI64x2Zrr)
-ENTRY(VEXTRACTF64x2Zmr, VEXTRACTF64x2Zmr, VEXTRACTI64x2Zmr)
-ENTRY(VEXTRACTF64x4Zrr, VEXTRACTF64x4Zrr, VEXTRACTI64x4Zrr)
-ENTRY(VEXTRACTF64x4Zmr, VEXTRACTF64x4Zmr, VEXTRACTI64x4Zmr)
-ENTRY(VEXTRACTF32x4Z256rr, VEXTRACTF32x4Z256rr, VEXTRACTI32x4Z256rr)
-ENTRY(VEXTRACTF32x4Z256mr, VEXTRACTF32x4Z256mr, VEXTRACTI32x4Z256mr)
-ENTRY(VEXTRACTF64x2Z256rr, VEXTRACTF64x2Z256rr, VEXTRACTI64x2Z256rr)
-ENTRY(VEXTRACTF64x2Z256mr, VEXTRACTF64x2Z256mr, VEXTRACTI64x2Z256mr)
+ENTRY(VINSERTF32x4Zrri, VINSERTF32x4Zrri, VINSERTI32x4Zrri)
+ENTRY(VINSERTF32x4Zrmi, VINSERTF32x4Zrmi, VINSERTI32x4Zrmi)
+ENTRY(VINSERTF32x8Zrri, VINSERTF32x8Zrri, VINSERTI32x8Zrri)
+ENTRY(VINSERTF32x8Zrmi, VINSERTF32x8Zrmi, VINSERTI32x8Zrmi)
+ENTRY(VINSERTF64x2Zrri, VINSERTF64x2Zrri, VINSERTI64x2Zrri)
+ENTRY(VINSERTF64x2Zrmi, VINSERTF64x2Zrmi, VINSERTI64x2Zrmi)
+ENTRY(VINSERTF64x4Zrri, VINSERTF64x4Zrri, VINSERTI64x4Zrri)
+ENTRY(VINSERTF64x4Zrmi, VINSERTF64x4Zrmi, VINSERTI64x4Zrmi)
+ENTRY(VINSERTF32x4Z256rri, VINSERTF32x4Z256rri, VINSERTI32x4Z256rri)
+ENTRY(VINSERTF32x4Z256rmi, VINSERTF32x4Z256rmi, VINSERTI32x4Z256rmi)
+ENTRY(VINSERTF64x2Z256rri, VINSERTF64x2Z256rri, VINSERTI64x2Z256rri)
+ENTRY(VINSERTF64x2Z256rmi, VINSERTF64x2Z256rmi, VINSERTI64x2Z256rmi)
+ENTRY(VEXTRACTF32x4Zrri, VEXTRACTF32x4Zrri, VEXTRACTI32x4Zrri)
+ENTRY(VEXTRACTF32x4Zmri, VEXTRACTF32x4Zmri, VEXTRACTI32x4Zmri)
+ENTRY(VEXTRACTF32x8Zrri, VEXTRACTF32x8Zrri, VEXTRACTI32x8Zrri)
+ENTRY(VEXTRACTF32x8Zmri, VEXTRACTF32x8Zmri, VEXTRACTI32x8Zmri)
+ENTRY(VEXTRACTF64x2Zrri, VEXTRACTF64x2Zrri, VEXTRACTI64x2Zrri)
+ENTRY(VEXTRACTF64x2Zmri, VEXTRACTF64x2Zmri, VEXTRACTI64x2Zmri)
+ENTRY(VEXTRACTF64x4Zrri, VEXTRACTF64x4Zrri, VEXTRACTI64x4Zrri)
+ENTRY(VEXTRACTF64x4Zmri, VEXTRACTF64x4Zmri, VEXTRACTI64x4Zmri)
+ENTRY(VEXTRACTF32x4Z256rri, VEXTRACTF32x4Z256rri, VEXTRACTI32x4Z256rri)
+ENTRY(VEXTRACTF32x4Z256mri, VEXTRACTF32x4Z256mri, VEXTRACTI32x4Z256mri)
+ENTRY(VEXTRACTF64x2Z256rri, VEXTRACTF64x2Z256rri, VEXTRACTI64x2Z256rri)
+ENTRY(VEXTRACTF64x2Z256mri, VEXTRACTF64x2Z256mri, VEXTRACTI64x2Z256mri)
 ENTRY(VPERMILPSmi, VPERMILPSmi, VPSHUFDmi)
 ENTRY(VPERMILPSri, VPERMILPSri, VPSHUFDri)
 ENTRY(VPERMILPSZ128mi, VPERMILPSZ128mi, VPSHUFDZ128mi)
@@ -192,8 +192,8 @@ ENTRY(VORPSYrm, VORPDYrm, VPORYrm)
 ENTRY(VORPSYrr, VORPDYrr, VPORYrr)
 ENTRY(VXORPSYrm, VXORPDYrm, VPXORYrm)
 ENTRY(VXORPSYrr, VXORPDYrr, VPXORYrr)
-ENTRY(VPERM2F128rm, VPERM2F128rm, VPERM2I128rm)
-ENTRY(VPERM2F128rr, VPERM2F128rr, VPERM2I128rr)
+ENTRY(VPERM2F128rmi, VPERM2F128rmi, VPERM2I128rmi)
+ENTRY(VPERM2F128rri, VPERM2F128rri, VPERM2I128rri)
 ENTRY(VBROADCASTSSrm, VBROADCASTSSrm, VPBROADCASTDrm)
 ENTRY(VBROADCASTSSrr, VBROADCASTSSrr, VPBROADCASTDrr)
 ENTRY(VMOVDDUPrm, VMOVDDUPrm, VPBROADCASTQrm)
@@ -232,10 +232,10 @@ ENTRY(VMOVHPSZ128mr, VMOVHPDZ128mr, INSTRUCTION_LIST_END)
 
 static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = {
 // PackedSingle, PackedDouble, PackedInt
-ENTRY(VEXTRACTF128mr, VEXTRACTF128mr, VEXTRACTI128mr)
-ENTRY(VEXTRACTF128rr, VEXTRACTF128rr, VEXTRACTI128rr)
-ENTRY(VINSERTF128rm, VINSERTF128rm, VINSERTI128rm)
-ENTRY(VINSERTF128rr, VINSERTF128rr, VINSERTI128rr)
+ENTRY(VEXTRACTF128mri, VEXTRACTF128mri, VEXTRACTI128mri)
+ENTRY(VEXTRACTF128rri, VEXTRACTF128rri, VEXTRACTI128rri)
+ENTRY(VINSERTF128rmi, VINSERTF128rmi, VINSERTI128rmi)
+ENTRY(VINSERTF128rri, VINSERTF128rri, VINSERTI128rri)
 };
 
 // NOTE: These should only be used by the custom domain methods.

diff  --git a/llvm/lib/Target/X86/X86SchedAlderlakeP.td b/llvm/lib/Target/X86/X86SchedAlderlakeP.td
index 7756cd57cf7f84..6917ff50d13f2c 100644
--- a/llvm/lib/Target/X86/X86SchedAlderlakeP.td
+++ b/llvm/lib/Target/X86/X86SchedAlderlakeP.td
@@ -2250,7 +2250,7 @@ def ADLPWriteResGroup255 : SchedWriteRes<[ADLPPort00_01_05, ADLPPort02_03_11]> {
   let Latency = 9;
   let NumMicroOps = 2;
 }
-def : InstRW<[ADLPWriteResGroup255, ReadAfterVecYLd], (instregex "^VINSERT(F|I)128rm$",
+def : InstRW<[ADLPWriteResGroup255, ReadAfterVecYLd], (instregex "^VINSERT(F|I)128rmi$",
                                                                  "^VP(ADD|SUB)(B|D|Q|W)Yrm$")>;
 
 def ADLPWriteResGroup256 : SchedWriteRes<[ADLPPort00, ADLPPort00_06, ADLPPort02_03_11]> {

diff  --git a/llvm/lib/Target/X86/X86SchedBroadwell.td b/llvm/lib/Target/X86/X86SchedBroadwell.td
index 697d30a8a95487..699ca91cd1f8f4 100644
--- a/llvm/lib/Target/X86/X86SchedBroadwell.td
+++ b/llvm/lib/Target/X86/X86SchedBroadwell.td
@@ -986,8 +986,8 @@ def BWWriteResGroup65 : SchedWriteRes<[BWPort23,BWPort015]> {
   let NumMicroOps = 2;
   let ReleaseAtCycles = [1,1];
 }
-def: InstRW<[BWWriteResGroup65], (instrs VINSERTF128rm,
-                                         VINSERTI128rm,
+def: InstRW<[BWWriteResGroup65], (instrs VINSERTF128rmi,
+                                         VINSERTI128rmi,
                                          VPBLENDDrmi)>;
 
 def BWWriteResGroup66 : SchedWriteRes<[BWPort23,BWPort0156]> {

diff  --git a/llvm/lib/Target/X86/X86SchedHaswell.td b/llvm/lib/Target/X86/X86SchedHaswell.td
index c4d2ad7681c432..b820418bb55191 100644
--- a/llvm/lib/Target/X86/X86SchedHaswell.td
+++ b/llvm/lib/Target/X86/X86SchedHaswell.td
@@ -1028,8 +1028,8 @@ def HWWriteResGroup17 : SchedWriteRes<[HWPort23,HWPort015]> {
   let NumMicroOps = 2;
   let ReleaseAtCycles = [1,1];
 }
-def: InstRW<[HWWriteResGroup17], (instrs VINSERTF128rm,
-                                         VINSERTI128rm,
+def: InstRW<[HWWriteResGroup17], (instrs VINSERTF128rmi,
+                                         VINSERTI128rmi,
                                          VPBLENDDrmi)>;
 
 def HWWriteResGroup17_2 : SchedWriteRes<[HWPort23,HWPort015]> {

diff  --git a/llvm/lib/Target/X86/X86SchedSandyBridge.td b/llvm/lib/Target/X86/X86SchedSandyBridge.td
index 6966400eff2c05..fc42d5e52451e0 100644
--- a/llvm/lib/Target/X86/X86SchedSandyBridge.td
+++ b/llvm/lib/Target/X86/X86SchedSandyBridge.td
@@ -892,7 +892,7 @@ def SBWriteResGroup58 : SchedWriteRes<[SBPort23,SBPort05]> {
   let NumMicroOps = 2;
   let ReleaseAtCycles = [1,1];
 }
-def: InstRW<[SBWriteResGroup58], (instrs VINSERTF128rm)>;
+def: InstRW<[SBWriteResGroup58], (instrs VINSERTF128rmi)>;
 
 def SBWriteResGroup59 : SchedWriteRes<[SBPort23,SBPort15]> {
   let Latency = 7;

diff  --git a/llvm/lib/Target/X86/X86SchedSapphireRapids.td b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
index ff3fe32be1851c..413d5bbaadd4df 100644
--- a/llvm/lib/Target/X86/X86SchedSapphireRapids.td
+++ b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
@@ -1661,8 +1661,8 @@ def : InstRW<[SPRWriteResGroup131], (instregex "^VBROADCAST(F|I)32X(8|2Z)rmk(z?)
                                                "^VMOVDQ(A|U)(32|64)Zrmk(z?)$",
                                                "^VPBROADCAST(D|Q)Zrmk(z?)$")>;
 def : InstRW<[SPRWriteResGroup131, ReadAfterVecLd], (instregex "^MMX_P(ADD|SUB)(B|D|Q|W)rm$")>;
-def : InstRW<[SPRWriteResGroup131, ReadAfterVecYLd], (instregex "^VINSERT(F|I)(32|64)x4Zrm((k|kz)?)$",
-                                                                "^VINSERT(F|I)(32x8|64x2)Zrm((k|kz)?)$",
+def : InstRW<[SPRWriteResGroup131, ReadAfterVecYLd], (instregex "^VINSERT(F|I)(32|64)x4Zrmi((k|kz)?)$",
+                                                                "^VINSERT(F|I)(32x8|64x2)Zrmi((k|kz)?)$",
                                                                 "^VP(ADD|SUB)(B|D|Q|W)Zrm$",
                                                                 "^VP(ADD|SUB)(D|Q)Zrm(b|k|kz)$",
                                                                 "^VP(ADD|SUB)(D|Q)Zrmbk(z?)$",
@@ -2704,8 +2704,8 @@ def : InstRW<[SPRWriteResGroup262], (instregex "^VBROADCAST(F|I)32X(2|4)Z256rmk(
                                                "^VMOV(D|SH|SL)DUPZ256rmk(z?)$",
                                                "^VMOVDQ(A|U)(32|64)Z256rmk(z?)$",
                                                "^VPBROADCAST(D|Q)Z256rmk(z?)$")>;
-def : InstRW<[SPRWriteResGroup262, ReadAfterVecYLd], (instregex "^VINSERT(F|I)128rm$",
-                                                                "^VINSERT(F|I)(32x4|64x2)Z256rm((k|kz)?)$",
+def : InstRW<[SPRWriteResGroup262, ReadAfterVecYLd], (instregex "^VINSERT(F|I)128rmi$",
+                                                                "^VINSERT(F|I)(32x4|64x2)Z256rmi((k|kz)?)$",
                                                                 "^VP(ADD|SUB)(B|D|Q|W)(Y|Z256)rm$",
                                                                 "^VP(ADD|SUB)(D|Q)Z256rm(b|k|kz)$",
                                                                 "^VP(ADD|SUB)(D|Q)Z256rmbk(z?)$",

diff  --git a/llvm/lib/Target/X86/X86SchedSkylakeClient.td b/llvm/lib/Target/X86/X86SchedSkylakeClient.td
index 14da1acdaafa5a..116aa3555a065c 100644
--- a/llvm/lib/Target/X86/X86SchedSkylakeClient.td
+++ b/llvm/lib/Target/X86/X86SchedSkylakeClient.td
@@ -1095,8 +1095,8 @@ def SKLWriteResGroup91 : SchedWriteRes<[SKLPort23,SKLPort015]> {
   let NumMicroOps = 2;
   let ReleaseAtCycles = [1,1];
 }
-def: InstRW<[SKLWriteResGroup91], (instrs VINSERTF128rm,
-                                          VINSERTI128rm,
+def: InstRW<[SKLWriteResGroup91], (instrs VINSERTF128rmi,
+                                          VINSERTI128rmi,
                                           VPBLENDDrmi)>;
 def: InstRW<[SKLWriteResGroup91, ReadAfterVecXLd],
                                   (instregex "(V?)PADD(B|D|Q|W)rm",

diff  --git a/llvm/lib/Target/X86/X86SchedSkylakeServer.td b/llvm/lib/Target/X86/X86SchedSkylakeServer.td
index 0ecfc30e884758..649d38de185a80 100644
--- a/llvm/lib/Target/X86/X86SchedSkylakeServer.td
+++ b/llvm/lib/Target/X86/X86SchedSkylakeServer.td
@@ -1343,7 +1343,7 @@ def: InstRW<[SKXWriteResGroup95, ReadAfterVecXLd],
                                              "VBLENDMPSZ128rm(b?)",
                                              "VBROADCASTI32X2Z128rm(b?)",
                                              "VBROADCASTSSZ128rm(b?)",
-                                             "VINSERT(F|I)128rm",
+                                             "VINSERT(F|I)128rmi",
                                              "VMOVAPDZ128rm(b?)",
                                              "VMOVAPSZ128rm(b?)",
                                              "VMOVDDUPZ128rm(b?)",

diff  --git a/llvm/lib/Target/X86/X86ScheduleBdVer2.td b/llvm/lib/Target/X86/X86ScheduleBdVer2.td
index 296504cfc78513..e5cc56320b3ff8 100644
--- a/llvm/lib/Target/X86/X86ScheduleBdVer2.td
+++ b/llvm/lib/Target/X86/X86ScheduleBdVer2.td
@@ -954,28 +954,28 @@ def PdWriteVEXTRACTF128rr : SchedWriteRes<[PdFPU01, PdFPFMA]> {
   let Latency = 2;
   let ReleaseAtCycles = [1, 2];
 }
-def : InstRW<[PdWriteVEXTRACTF128rr], (instrs VEXTRACTF128rr)>;
+def : InstRW<[PdWriteVEXTRACTF128rr], (instrs VEXTRACTF128rri)>;
 
 def PdWriteVEXTRACTF128mr : SchedWriteRes<[PdFPU01, PdFPFMA]> {
   let Latency = 7;
   let ReleaseAtCycles = [1, 4];
   let NumMicroOps = 2;
 }
-def : InstRW<[PdWriteVEXTRACTF128mr], (instrs VEXTRACTF128mr)>;
+def : InstRW<[PdWriteVEXTRACTF128mr], (instrs VEXTRACTF128mri)>;
 
 def PdWriteVPERM2F128rr : SchedWriteRes<[PdFPU01, PdFPFMA]> {
   let Latency = 4;
   let ReleaseAtCycles = [1, 6];
   let NumMicroOps = 8;
 }
-def : InstRW<[PdWriteVPERM2F128rr], (instrs VPERM2F128rr)>;
+def : InstRW<[PdWriteVPERM2F128rr], (instrs VPERM2F128rri)>;
 
 def PdWriteVPERM2F128rm : SchedWriteRes<[PdFPU01, PdFPFMA]> {
   let Latency = 8; // 4 + 4
   let ReleaseAtCycles = [1, 8];
   let NumMicroOps = 10;
 }
-def : InstRW<[PdWriteVPERM2F128rm], (instrs VPERM2F128rm)>;
+def : InstRW<[PdWriteVPERM2F128rm], (instrs VPERM2F128rmi)>;
 
 ////////////////////////////////////////////////////////////////////////////////
 // Conversions.

diff  --git a/llvm/lib/Target/X86/X86ScheduleBtVer2.td b/llvm/lib/Target/X86/X86ScheduleBtVer2.td
index 9cba933e82b054..d9378251dd29dc 100644
--- a/llvm/lib/Target/X86/X86ScheduleBtVer2.td
+++ b/llvm/lib/Target/X86/X86ScheduleBtVer2.td
@@ -807,7 +807,7 @@ def : InstRW<[JWriteINSERTQ], (instrs INSERTQ, INSERTQI)>;
 ////////////////////////////////////////////////////////////////////////////////
 
 def JWriteVecExtractF128: SchedWriteRes<[JFPU01, JFPX]>;
-def : InstRW<[JWriteVecExtractF128], (instrs VEXTRACTF128rr)>;
+def : InstRW<[JWriteVecExtractF128], (instrs VEXTRACTF128rri)>;
 
 def JWriteVBROADCASTYLd: SchedWriteRes<[JLAGU, JFPU01, JFPX]> {
   let Latency = 6;
@@ -930,7 +930,7 @@ def JWriteVPERM2F128 : SchedWriteVariant<[
   SchedVar<MCSchedPredicate<ZeroIdiomVPERMPredicate>, [JWriteZeroIdiomYmm]>,
   SchedVar<NoSchedPred,                               [WriteFShuffle256]>
 ]>;
-def : InstRW<[JWriteVPERM2F128], (instrs VPERM2F128rr)>;
+def : InstRW<[JWriteVPERM2F128], (instrs VPERM2F128rri)>;
 
 // This write is used for slow LEA instructions.
 def JWrite3OpsLEA : SchedWriteRes<[JALU1, JSAGU]> {
@@ -1008,7 +1008,7 @@ def : IsZeroIdiomFunction<[
     VXORPSYrr, VXORPDYrr, VANDNPSYrr, VANDNPDYrr
   ], ZeroIdiomPredicate>,
 
-  DepBreakingClass<[ VPERM2F128rr ], ZeroIdiomVPERMPredicate>
+  DepBreakingClass<[ VPERM2F128rri ], ZeroIdiomVPERMPredicate>
 ]>;
 
 def : IsDepBreakingFunction<[

diff  --git a/llvm/lib/Target/X86/X86ScheduleZnver1.td b/llvm/lib/Target/X86/X86ScheduleZnver1.td
index a044ddc3001329..f66b7172a7ffc5 100644
--- a/llvm/lib/Target/X86/X86ScheduleZnver1.td
+++ b/llvm/lib/Target/X86/X86ScheduleZnver1.td
@@ -991,16 +991,16 @@ def ZnWriteVPERM2r : SchedWriteRes<[ZnFPU0, ZnFPU12]> {
   let Latency = 3;
   let ReleaseAtCycles = [3,3];
 }
-def : InstRW<[ZnWriteVPERM2r], (instrs VPERM2F128rr,
-                                       VPERM2I128rr)>;
+def : InstRW<[ZnWriteVPERM2r], (instrs VPERM2F128rri,
+                                       VPERM2I128rri)>;
 
 def ZnWriteVPERM2m : SchedWriteRes<[ZnAGU, ZnFPU0, ZnFPU12]> {
   let NumMicroOps = 12;
   let Latency = 8;
   let ReleaseAtCycles = [1,3,3];
 }
-def : InstRW<[ZnWriteVPERM2m], (instrs VPERM2F128rm,
-                                       VPERM2I128rm)>;
+def : InstRW<[ZnWriteVPERM2m], (instrs VPERM2F128rmi,
+                                       VPERM2I128rmi)>;
 
 def ZnWriteBROADCAST : SchedWriteRes<[ZnAGU, ZnFPU13]> {
   let NumMicroOps = 2;
@@ -1029,12 +1029,12 @@ def : InstRW<[ZnWriteEXTRACTPSm], (instregex "(V?)EXTRACTPSmr")>;
 
 // VEXTRACTF128 / VEXTRACTI128.
 // x,y,i.
-def : InstRW<[ZnWriteFPU013], (instrs VEXTRACTF128rr,
-                                      VEXTRACTI128rr)>;
+def : InstRW<[ZnWriteFPU013], (instrs VEXTRACTF128rri,
+                                      VEXTRACTI128rri)>;
 
 // m128,y,i.
-def : InstRW<[ZnWriteFPU013m], (instrs VEXTRACTF128mr,
-                                       VEXTRACTI128mr)>;
+def : InstRW<[ZnWriteFPU013m], (instrs VEXTRACTF128mri,
+                                       VEXTRACTI128mri)>;
 
 def ZnWriteVINSERT128r: SchedWriteRes<[ZnFPU013]> {
   let Latency = 2;
@@ -1047,10 +1047,10 @@ def ZnWriteVINSERT128Ld: SchedWriteRes<[ZnAGU,ZnFPU013]> {
 }
 // VINSERTF128 / VINSERTI128.
 // y,y,x,i.
-def : InstRW<[ZnWriteVINSERT128r], (instrs VINSERTF128rr,
-                                           VINSERTI128rr)>;
-def : InstRW<[ZnWriteVINSERT128Ld], (instrs VINSERTF128rm,
-                                            VINSERTI128rm)>;
+def : InstRW<[ZnWriteVINSERT128r], (instrs VINSERTF128rri,
+                                           VINSERTI128rri)>;
+def : InstRW<[ZnWriteVINSERT128Ld], (instrs VINSERTF128rmi,
+                                            VINSERTI128rmi)>;
 
 // VGATHER.
 def : InstRW<[WriteMicrocoded], (instregex "VGATHER(Q|D)(PD|PS)(Y?)rm")>;

diff  --git a/llvm/lib/Target/X86/X86ScheduleZnver2.td b/llvm/lib/Target/X86/X86ScheduleZnver2.td
index c3a0f2684a2989..14e18cb9dc7178 100644
--- a/llvm/lib/Target/X86/X86ScheduleZnver2.td
+++ b/llvm/lib/Target/X86/X86ScheduleZnver2.td
@@ -998,15 +998,15 @@ def Zn2WriteVPERM2r : SchedWriteRes<[Zn2FPU2]> {
   let NumMicroOps = 1;
   let Latency = 3;
 }
-def : InstRW<[Zn2WriteVPERM2r], (instrs VPERM2F128rr,
-                                        VPERM2I128rr)>;
+def : InstRW<[Zn2WriteVPERM2r], (instrs VPERM2F128rri,
+                                        VPERM2I128rri)>;
 
 def Zn2WriteVPERM2m : SchedWriteRes<[Zn2AGU, Zn2FPU2]> {
   let NumMicroOps = 1;
   let Latency = 8;
 }
-def : InstRW<[Zn2WriteVPERM2m], (instrs VPERM2F128rm,
-                                        VPERM2I128rm)>;
+def : InstRW<[Zn2WriteVPERM2m], (instrs VPERM2F128rmi,
+                                        VPERM2I128rmi)>;
 
 def Zn2WriteBROADCAST : SchedWriteRes<[Zn2AGU, Zn2FPU13]> {
   let NumMicroOps = 2;
@@ -1035,12 +1035,12 @@ def : InstRW<[Zn2WriteEXTRACTPSm], (instregex "(V?)EXTRACTPSmr")>;
 
 // VEXTRACTF128 / VEXTRACTI128.
 // x,y,i.
-def : InstRW<[Zn2WriteFPU013], (instrs VEXTRACTF128rr,
-                                       VEXTRACTI128rr)>;
+def : InstRW<[Zn2WriteFPU013], (instrs VEXTRACTF128rri,
+                                       VEXTRACTI128rri)>;
 
 // m128,y,i.
-def : InstRW<[Zn2WriteFPU013m], (instrs VEXTRACTF128mr,
-                                        VEXTRACTI128mr)>;
+def : InstRW<[Zn2WriteFPU013m], (instrs VEXTRACTF128mri,
+                                        VEXTRACTI128mri)>;
 
 def Zn2WriteVINSERT128r: SchedWriteRes<[Zn2FPU013]> {
   let Latency = 2;
@@ -1052,10 +1052,10 @@ def Zn2WriteVINSERT128Ld: SchedWriteRes<[Zn2AGU,Zn2FPU013]> {
 }
 // VINSERTF128 / VINSERTI128.
 // y,y,x,i.
-def : InstRW<[Zn2WriteVINSERT128r], (instrs VINSERTF128rr,
-                                            VINSERTI128rr)>;
-def : InstRW<[Zn2WriteVINSERT128Ld], (instrs VINSERTF128rm,
-                                             VINSERTI128rm)>;
+def : InstRW<[Zn2WriteVINSERT128r], (instrs VINSERTF128rri,
+                                            VINSERTI128rri)>;
+def : InstRW<[Zn2WriteVINSERT128Ld], (instrs VINSERTF128rmi,
+                                             VINSERTI128rmi)>;
 
 // VGATHER.
 def : InstRW<[WriteMicrocoded], (instregex "VGATHER(Q|D)(PD|PS)(Y?)rm")>;

diff  --git a/llvm/lib/Target/X86/X86ScheduleZnver3.td b/llvm/lib/Target/X86/X86ScheduleZnver3.td
index cbf1de8408798f..9e271c1ee37093 100644
--- a/llvm/lib/Target/X86/X86ScheduleZnver3.td
+++ b/llvm/lib/Target/X86/X86ScheduleZnver3.td
@@ -989,21 +989,21 @@ def Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr : SchedWriteRes<[Zn3FPFMisc0]> {
   let ReleaseAtCycles = [1];
   let NumMicroOps = 1;
 }
-def : InstRW<[Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr], (instrs VEXTRACTF128rr, VEXTRACTI128rr)>;
+def : InstRW<[Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr], (instrs VEXTRACTF128rri, VEXTRACTI128rri)>;
 
 def Zn3WriteVEXTRACTI128mr : SchedWriteRes<[Zn3FPFMisc0, Zn3FPSt, Zn3Store]> {
   let Latency = !add(Znver3Model.LoadLatency, Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr.Latency);
   let ReleaseAtCycles = [1, 1, 1];
   let NumMicroOps = !add(Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr.NumMicroOps, 1);
 }
-def : InstRW<[Zn3WriteVEXTRACTI128mr], (instrs VEXTRACTI128mr, VEXTRACTF128mr)>;
+def : InstRW<[Zn3WriteVEXTRACTI128mr], (instrs VEXTRACTI128mri, VEXTRACTF128mri)>;
 
 def Zn3WriteVINSERTF128rmr : SchedWriteRes<[Zn3AGU012, Zn3Load, Zn3FPFMisc0]> {
   let Latency = !add(Znver3Model.LoadLatency, Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr.Latency);
   let ReleaseAtCycles = [1, 1, 1];
   let NumMicroOps = !add(Zn3WriteVEXTRACTF128rr_VEXTRACTI128rr.NumMicroOps, 0);
 }
-def : InstRW<[Zn3WriteVINSERTF128rmr], (instrs VINSERTF128rm)>;
+def : InstRW<[Zn3WriteVINSERTF128rmr], (instrs VINSERTF128rmi)>;
 
 defm : Zn3WriteResYMM<WriteVecStoreY, [Zn3FPSt, Zn3Store], Znver3Model.StoreLatency, [1, 1], 1>;
 defm : Zn3WriteResXMM<WriteVecStoreNT, [Zn3FPSt, Zn3Store], Znver3Model.StoreLatency, [1, 1], 1>;
@@ -1335,14 +1335,14 @@ def Zn3WriteVPERM2I128rr_VPERM2F128rr : SchedWriteRes<[Zn3FPVShuf]> {
   let ReleaseAtCycles = [1];
   let NumMicroOps = 1;
 }
-def : InstRW<[Zn3WriteVPERM2I128rr_VPERM2F128rr], (instrs VPERM2I128rr, VPERM2F128rr)>;
+def : InstRW<[Zn3WriteVPERM2I128rr_VPERM2F128rr], (instrs VPERM2I128rri, VPERM2F128rri)>;
 
 def Zn3WriteVPERM2F128rm : SchedWriteRes<[Zn3AGU012, Zn3Load, Zn3FPVShuf]> {
   let Latency = !add(Znver3Model.LoadLatency, Zn3WriteVPERM2I128rr_VPERM2F128rr.Latency);
   let ReleaseAtCycles = [1, 1, 1];
   let NumMicroOps = !add(Zn3WriteVPERM2I128rr_VPERM2F128rr.NumMicroOps, 0);
 }
-def : InstRW<[Zn3WriteVPERM2F128rm], (instrs VPERM2F128rm)>;
+def : InstRW<[Zn3WriteVPERM2F128rm], (instrs VPERM2F128rmi)>;
 
 def Zn3WriteVPERMPSYrm : SchedWriteRes<[Zn3AGU012, Zn3Load, Zn3FPVShuf]> {
   let Latency = !add(Znver3Model.LoadLatency, 7);

diff  --git a/llvm/lib/Target/X86/X86ScheduleZnver4.td b/llvm/lib/Target/X86/X86ScheduleZnver4.td
index 6181ee841dd411..f82f9a88bb25e0 100644
--- a/llvm/lib/Target/X86/X86ScheduleZnver4.td
+++ b/llvm/lib/Target/X86/X86ScheduleZnver4.td
@@ -1001,21 +1001,21 @@ def Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr : SchedWriteRes<[Zn4FPFMisc0]> {
   let ReleaseAtCycles = [1];
   let NumMicroOps = 1;
 }
-def : InstRW<[Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr], (instrs VEXTRACTF128rr, VEXTRACTI128rr)>;
+def : InstRW<[Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr], (instrs VEXTRACTF128rri, VEXTRACTI128rri)>;
 
 def Zn4WriteVEXTRACTI128mr : SchedWriteRes<[Zn4FPFMisc0, Zn4FPSt, Zn4Store]> {
   let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.Latency);
   let ReleaseAtCycles = [1, 1, 1];
   let NumMicroOps = !add(Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.NumMicroOps, 1);
 }
-def : InstRW<[Zn4WriteVEXTRACTI128mr], (instrs VEXTRACTI128mr, VEXTRACTF128mr)>;
+def : InstRW<[Zn4WriteVEXTRACTI128mr], (instrs VEXTRACTI128mri, VEXTRACTF128mri)>;
 
 def Zn4WriteVINSERTF128rmr : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPFMisc0]> {
   let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.Latency);
   let ReleaseAtCycles = [1, 1, 1];
   let NumMicroOps = !add(Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.NumMicroOps, 0);
 }
-def : InstRW<[Zn4WriteVINSERTF128rmr], (instrs VINSERTF128rm)>;
+def : InstRW<[Zn4WriteVINSERTF128rmr], (instrs VINSERTF128rmi)>;
 
 defm : Zn4WriteResYMM<WriteVecStoreY, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
 defm : Zn4WriteResXMM<WriteVecStoreNT, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
@@ -1375,14 +1375,14 @@ def Zn4WriteVPERM2I128rr_VPERM2F128rr : SchedWriteRes<[Zn4FPVShuf]> {
   let ReleaseAtCycles = [1];
   let NumMicroOps = 1;
 }
-def : InstRW<[Zn4WriteVPERM2I128rr_VPERM2F128rr], (instrs VPERM2I128rr, VPERM2F128rr)>;
+def : InstRW<[Zn4WriteVPERM2I128rr_VPERM2F128rr], (instrs VPERM2I128rri, VPERM2F128rri)>;
 
 def Zn4WriteVPERM2F128rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPVShuf]> {
   let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVPERM2I128rr_VPERM2F128rr.Latency);
   let ReleaseAtCycles = [1, 1, 1];
   let NumMicroOps = !add(Zn4WriteVPERM2I128rr_VPERM2F128rr.NumMicroOps, 0);
 }
-def : InstRW<[Zn4WriteVPERM2F128rm], (instrs VPERM2F128rm)>;
+def : InstRW<[Zn4WriteVPERM2F128rm], (instrs VPERM2F128rmi)>;
 
 def Zn4WriteVPERMPSYrr : SchedWriteRes<[Zn4FPVShuf]> {
   let Latency = 7;

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir
index 36a9244fe54288..73af03b34ec77c 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir
@@ -60,12 +60,12 @@ registers:
   - { id: 0, class: vecr }
   - { id: 1, class: vecr }
 # AVX:               %0:vr256 = COPY $ymm1
-# AVX-NEXT:          %1:vr128 = VEXTRACTF128rr %0, 1
+# AVX-NEXT:          %1:vr128 = VEXTRACTF128rri %0, 1
 # AVX-NEXT:          $xmm0 = COPY %1
 # AVX-NEXT:          RET 0, implicit $xmm0
 #
 # AVX512VL:          %0:vr256x = COPY $ymm1
-# AVX512VL-NEXT:     %1:vr128x = VEXTRACTF32x4Z256rr %0, 1
+# AVX512VL-NEXT:     %1:vr128x = VEXTRACTF32x4Z256rri %0, 1
 # AVX512VL-NEXT:     $xmm0 = COPY %1
 # AVX512VL-NEXT:     RET 0, implicit $xmm0
 body:             |

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir
index f0491b6e0d8028..5ddf58e6455576 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir
@@ -59,7 +59,7 @@ registers:
   - { id: 0, class: vecr }
   - { id: 1, class: vecr }
 # ALL:          %0:vr512 = COPY $zmm1
-# ALL-NEXT:     %1:vr128x = VEXTRACTF32x4Zrr %0, 1
+# ALL-NEXT:     %1:vr128x = VEXTRACTF32x4Zrri %0, 1
 # ALL-NEXT:     $xmm0 = COPY %1
 # ALL-NEXT:     RET 0, implicit $xmm0
 body:             |
@@ -111,7 +111,7 @@ registers:
   - { id: 0, class: vecr }
   - { id: 1, class: vecr }
 # ALL:          %0:vr512 = COPY $zmm1
-# ALL-NEXT:     %1:vr256x = VEXTRACTF64x4Zrr %0, 1
+# ALL-NEXT:     %1:vr256x = VEXTRACTF64x4Zrri %0, 1
 # ALL-NEXT:     $ymm0 = COPY %1
 # ALL-NEXT:     RET 0, implicit $ymm0
 body:             |

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
index 9424e1d52b754b..f04917c747979f 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
@@ -30,13 +30,13 @@ registers:
   - { id: 2, class: vecr }
 # AVX:               %0:vr256 = COPY $ymm0
 # AVX-NEXT:          %1:vr128 = COPY $xmm1
-# AVX-NEXT:          %2:vr256 = VINSERTF128rr %0, %1, 0
+# AVX-NEXT:          %2:vr256 = VINSERTF128rri %0, %1, 0
 # AVX-NEXT:          $ymm0 = COPY %2
 # AVX-NEXT:          RET 0, implicit $ymm0
 #
 # AVX512VL:          %0:vr256x = COPY $ymm0
 # AVX512VL-NEXT:     %1:vr128x = COPY $xmm1
-# AVX512VL-NEXT:     %2:vr256x = VINSERTF32x4Z256rr %0, %1, 0
+# AVX512VL-NEXT:     %2:vr256x = VINSERTF32x4Z256rri %0, %1, 0
 # AVX512VL-NEXT:     $ymm0 = COPY %2
 # AVX512VL-NEXT:     RET 0, implicit $ymm0
 body:             |
@@ -92,13 +92,13 @@ registers:
   - { id: 2, class: vecr }
 # AVX:               %0:vr256 = COPY $ymm0
 # AVX-NEXT:          %1:vr128 = COPY $xmm1
-# AVX-NEXT:          %2:vr256 = VINSERTF128rr %0, %1, 1
+# AVX-NEXT:          %2:vr256 = VINSERTF128rri %0, %1, 1
 # AVX-NEXT:          $ymm0 = COPY %2
 # AVX-NEXT:          RET 0, implicit $ymm0
 #
 # AVX512VL:          %0:vr256x = COPY $ymm0
 # AVX512VL-NEXT:     %1:vr128x = COPY $xmm1
-# AVX512VL-NEXT:     %2:vr256x = VINSERTF32x4Z256rr %0, %1, 1
+# AVX512VL-NEXT:     %2:vr256x = VINSERTF32x4Z256rri %0, %1, 1
 # AVX512VL-NEXT:     $ymm0 = COPY %2
 # AVX512VL-NEXT:     RET 0, implicit $ymm0
 body:             |
@@ -123,13 +123,13 @@ registers:
   - { id: 2, class: vecr }
 # AVX:               %0:vr256 = IMPLICIT_DEF
 # AVX-NEXT:          %1:vr128 = COPY $xmm1
-# AVX-NEXT:          %2:vr256 = VINSERTF128rr %0, %1, 1
+# AVX-NEXT:          %2:vr256 = VINSERTF128rri %0, %1, 1
 # AVX-NEXT:          $ymm0 = COPY %2
 # AVX-NEXT:          RET 0, implicit $ymm0
 #
 # AVX512VL:          %0:vr256x = IMPLICIT_DEF
 # AVX512VL-NEXT:     %1:vr128x = COPY $xmm1
-# AVX512VL-NEXT:     %2:vr256x = VINSERTF32x4Z256rr %0, %1, 1
+# AVX512VL-NEXT:     %2:vr256x = VINSERTF32x4Z256rri %0, %1, 1
 # AVX512VL-NEXT:     $ymm0 = COPY %2
 # AVX512VL-NEXT:     RET 0, implicit $ymm0
 body:             |

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
index fefce0bc17cf55..10d98d7a3111be 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
@@ -51,8 +51,8 @@ body:             |
     ; ALL-LABEL: name: test_insert_128_idx0
     ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
     ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
-    ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 0
-    ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]]
+    ; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[COPY]], [[COPY1]], 0
+    ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]]
     ; ALL: RET 0, implicit $ymm0
     %0(<16 x s32>) = COPY $zmm0
     %1(<4 x s32>) = COPY $xmm1
@@ -102,8 +102,8 @@ body:             |
     ; ALL-LABEL: name: test_insert_128_idx1
     ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
     ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
-    ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 1
-    ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]]
+    ; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[COPY]], [[COPY1]], 1
+    ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]]
     ; ALL: RET 0, implicit $ymm0
     %0(<16 x s32>) = COPY $zmm0
     %1(<4 x s32>) = COPY $xmm1
@@ -127,8 +127,8 @@ body:             |
     ; ALL-LABEL: name: test_insert_128_idx1_undef
     ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
     ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
-    ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[DEF]], [[COPY]], 1
-    ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]]
+    ; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[DEF]], [[COPY]], 1
+    ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]]
     ; ALL: RET 0, implicit $ymm0
     %0(<16 x s32>) = IMPLICIT_DEF
     %1(<4 x s32>) = COPY $xmm1
@@ -152,8 +152,8 @@ body:             |
     ; ALL-LABEL: name: test_insert_256_idx0
     ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
     ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
-    ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 0
-    ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+    ; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[COPY]], [[COPY1]], 0
+    ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
     ; ALL: RET 0, implicit $ymm0
     %0(<16 x s32>) = COPY $zmm0
     %1(<8 x s32>) = COPY $ymm1
@@ -203,8 +203,8 @@ body:             |
     ; ALL-LABEL: name: test_insert_256_idx1
     ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
     ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
-    ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 1
-    ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+    ; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[COPY]], [[COPY1]], 1
+    ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
     ; ALL: RET 0, implicit $ymm0
     %0(<16 x s32>) = COPY $zmm0
     %1(<8 x s32>) = COPY $ymm1
@@ -228,8 +228,8 @@ body:             |
     ; ALL-LABEL: name: test_insert_256_idx1_undef
     ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
     ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1
-    ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[DEF]], [[COPY]], 1
-    ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+    ; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[DEF]], [[COPY]], 1
+    ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
     ; ALL: RET 0, implicit $ymm0
     %0(<16 x s32>) = IMPLICIT_DEF
     %1(<8 x s32>) = COPY $ymm1

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
index 8c04cc6f76c9aa..9d6494d628bf0c 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
@@ -23,14 +23,14 @@ body:             |
     ; AVX-LABEL: name: test_merge
     ; AVX: [[DEF:%[0-9]+]]:vr128 = IMPLICIT_DEF
     ; AVX: undef %2.sub_xmm:vr256 = COPY [[DEF]]
-    ; AVX: [[VINSERTF128rr:%[0-9]+]]:vr256 = VINSERTF128rr %2, [[DEF]], 1
-    ; AVX: $ymm0 = COPY [[VINSERTF128rr]]
+    ; AVX: [[VINSERTF128rri:%[0-9]+]]:vr256 = VINSERTF128rri %2, [[DEF]], 1
+    ; AVX: $ymm0 = COPY [[VINSERTF128rri]]
     ; AVX: RET 0, implicit $ymm0
     ; AVX512VL-LABEL: name: test_merge
     ; AVX512VL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF
     ; AVX512VL: undef %2.sub_xmm:vr256x = COPY [[DEF]]
-    ; AVX512VL: [[VINSERTF32x4Z256rr:%[0-9]+]]:vr256x = VINSERTF32x4Z256rr %2, [[DEF]], 1
-    ; AVX512VL: $ymm0 = COPY [[VINSERTF32x4Z256rr]]
+    ; AVX512VL: [[VINSERTF32x4Z256rri:%[0-9]+]]:vr256x = VINSERTF32x4Z256rri %2, [[DEF]], 1
+    ; AVX512VL: $ymm0 = COPY [[VINSERTF32x4Z256rri]]
     ; AVX512VL: RET 0, implicit $ymm0
     %0(<4 x s32>) = IMPLICIT_DEF
     %1(<8 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>)

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
index 3c003d6cf92668..22045d3bb8cbb4 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
@@ -24,10 +24,10 @@ body:             |
     ; ALL-LABEL: name: test_merge_v128
     ; ALL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF
     ; ALL: undef %2.sub_xmm:vr512 = COPY [[DEF]]
-    ; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr %2, [[DEF]], 1
-    ; ALL: [[VINSERTF32x4Zrr1:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[VINSERTF32x4Zrr]], [[DEF]], 2
-    ; ALL: [[VINSERTF32x4Zrr2:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[VINSERTF32x4Zrr1]], [[DEF]], 3
-    ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr2]]
+    ; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri %2, [[DEF]], 1
+    ; ALL: [[VINSERTF32x4Zrri1:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[VINSERTF32x4Zrri]], [[DEF]], 2
+    ; ALL: [[VINSERTF32x4Zrri2:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[VINSERTF32x4Zrri1]], [[DEF]], 3
+    ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri2]]
     ; ALL: RET 0, implicit $zmm0
     %0(<4 x s32>) = IMPLICIT_DEF
     %1(<16 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>)
@@ -49,8 +49,8 @@ body:             |
     ; ALL-LABEL: name: test_merge_v256
     ; ALL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
     ; ALL: undef %2.sub_ymm:vr512 = COPY [[DEF]]
-    ; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr %2, [[DEF]], 1
-    ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+    ; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri %2, [[DEF]], 1
+    ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
     ; ALL: RET 0, implicit $zmm0
     %0(<8 x s32>) = IMPLICIT_DEF
     %1(<16 x s32>) = G_CONCAT_VECTORS %0(<8 x s32>), %0(<8 x s32>)

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir
index 39471928d44b51..5ed1463f873a94 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir
@@ -24,18 +24,19 @@ body:             |
 
     ; AVX-LABEL: name: test_unmerge
     ; AVX: [[DEF:%[0-9]+]]:vr256 = IMPLICIT_DEF
-    ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY [[DEF]].sub_xmm
-    ; AVX: [[VEXTRACTF128rr:%[0-9]+]]:vr128 = VEXTRACTF128rr [[DEF]], 1
-    ; AVX: $xmm0 = COPY [[COPY]]
-    ; AVX: $xmm1 = COPY [[VEXTRACTF128rr]]
-    ; AVX: RET 0, implicit $xmm0, implicit $xmm1
+    ; AVX-NEXT: [[COPY:%[0-9]+]]:vr128 = COPY [[DEF]].sub_xmm
+    ; AVX-NEXT: [[VEXTRACTF128rri:%[0-9]+]]:vr128 = VEXTRACTF128rri [[DEF]], 1
+    ; AVX-NEXT: $xmm0 = COPY [[COPY]]
+    ; AVX-NEXT: $xmm1 = COPY [[VEXTRACTF128rri]]
+    ; AVX-NEXT: RET 0, implicit $xmm0, implicit $xmm1
+    ;
     ; AVX512VL-LABEL: name: test_unmerge
     ; AVX512VL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
-    ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm
-    ; AVX512VL: [[VEXTRACTF32x4Z256rr:%[0-9]+]]:vr128x = VEXTRACTF32x4Z256rr [[DEF]], 1
-    ; AVX512VL: $xmm0 = COPY [[COPY]]
-    ; AVX512VL: $xmm1 = COPY [[VEXTRACTF32x4Z256rr]]
-    ; AVX512VL: RET 0, implicit $xmm0, implicit $xmm1
+    ; AVX512VL-NEXT: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm
+    ; AVX512VL-NEXT: [[VEXTRACTF32x4Z256rri:%[0-9]+]]:vr128x = VEXTRACTF32x4Z256rri [[DEF]], 1
+    ; AVX512VL-NEXT: $xmm0 = COPY [[COPY]]
+    ; AVX512VL-NEXT: $xmm1 = COPY [[VEXTRACTF32x4Z256rri]]
+    ; AVX512VL-NEXT: RET 0, implicit $xmm0, implicit $xmm1
     %0(<8 x s32>) = IMPLICIT_DEF
     %1(<4 x s32>), %2(<4 x s32>) = G_UNMERGE_VALUES %0(<8 x s32>)
     $xmm0 = COPY %1(<4 x s32>)

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir
index 17730f985f97e9..8864d5bb47488e 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir
@@ -27,9 +27,9 @@ body:             |
     ; ALL-LABEL: name: test_unmerge_v128
     ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
     ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm
-    ; ALL: [[VEXTRACTF32x4Zrr:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 1
-    ; ALL: [[VEXTRACTF32x4Zrr1:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 2
-    ; ALL: [[VEXTRACTF32x4Zrr2:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 3
+    ; ALL: [[VEXTRACTF32x4Zrri:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrri [[DEF]], 1
+    ; ALL: [[VEXTRACTF32x4Zrri1:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrri [[DEF]], 2
+    ; ALL: [[VEXTRACTF32x4Zrri2:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrri [[DEF]], 3
     ; ALL: $xmm0 = COPY [[COPY]]
     ; ALL: RET 0, implicit $xmm0
     %0(<16 x s32>) = IMPLICIT_DEF
@@ -53,7 +53,7 @@ body:             |
     ; ALL-LABEL: name: test_unmerge_v256
     ; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
     ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY [[DEF]].sub_ymm
-    ; ALL: [[VEXTRACTF64x4Zrr:%[0-9]+]]:vr256x = VEXTRACTF64x4Zrr [[DEF]], 1
+    ; ALL: [[VEXTRACTF64x4Zrri:%[0-9]+]]:vr256x = VEXTRACTF64x4Zrri [[DEF]], 1
     ; ALL: $ymm0 = COPY [[COPY]]
     ; ALL: RET 0, implicit $ymm0
     %0(<16 x s32>) = IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
index 9cf087000cb60e..1d2cda259d8e55 100644
--- a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
+++ b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
@@ -877,21 +877,21 @@ body: |
   $ymm0 = VRNDSCALEPSZ256rmi                   $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
   ; CHECK: $ymm0 = VROUNDPSYri                 $ymm0, 15, implicit $mxcsr
   $ymm0 = VRNDSCALEPSZ256rri                   $ymm0, 15, implicit $mxcsr
-  ; CHECK: $ymm0 = VPERM2F128rm                $ymm0, $rip, 1, $noreg, 0, $noreg, 32
+  ; CHECK: $ymm0 = VPERM2F128rmi               $ymm0, $rip, 1, $noreg, 0, $noreg, 32
   $ymm0 = VSHUFF32X4Z256rmi                    $ymm0, $rip, 1, $noreg, 0, $noreg, 228
-  ; CHECK: $ymm0 = VPERM2F128rr                $ymm0, $ymm1, 32
+  ; CHECK: $ymm0 = VPERM2F128rri               $ymm0, $ymm1, 32
   $ymm0 = VSHUFF32X4Z256rri                    $ymm0, $ymm1, 228
-  ; CHECK: $ymm0 = VPERM2F128rm                $ymm0, $rip, 1, $noreg, 0, $noreg, 32
+  ; CHECK: $ymm0 = VPERM2F128rmi               $ymm0, $rip, 1, $noreg, 0, $noreg, 32
   $ymm0 = VSHUFF64X2Z256rmi                    $ymm0, $rip, 1, $noreg, 0, $noreg, 228
-  ; CHECK: $ymm0 = VPERM2F128rr                $ymm0, $ymm1, 32
+  ; CHECK: $ymm0 = VPERM2F128rri               $ymm0, $ymm1, 32
   $ymm0 = VSHUFF64X2Z256rri                    $ymm0, $ymm1, 228
-  ; CHECK: $ymm0 = VPERM2I128rm                $ymm0, $rip, 1, $noreg, 0, $noreg, 32
+  ; CHECK: $ymm0 = VPERM2I128rmi               $ymm0, $rip, 1, $noreg, 0, $noreg, 32
   $ymm0 = VSHUFI32X4Z256rmi                    $ymm0, $rip, 1, $noreg, 0, $noreg, 228
-  ; CHECK: $ymm0 = VPERM2I128rr                $ymm0, $ymm1, 32
+  ; CHECK: $ymm0 = VPERM2I128rri               $ymm0, $ymm1, 32
   $ymm0 = VSHUFI32X4Z256rri                    $ymm0, $ymm1, 228
-  ; CHECK: $ymm0 = VPERM2I128rm                $ymm0, $rip, 1, $noreg, 0, $noreg, 32
+  ; CHECK: $ymm0 = VPERM2I128rmi               $ymm0, $rip, 1, $noreg, 0, $noreg, 32
   $ymm0 = VSHUFI64X2Z256rmi                    $ymm0, $rip, 1, $noreg, 0, $noreg, 228
-  ; CHECK: $ymm0 = VPERM2I128rr                $ymm0, $ymm1, 32
+  ; CHECK: $ymm0 = VPERM2I128rri               $ymm0, $ymm1, 32
   $ymm0 = VSHUFI64X2Z256rri                    $ymm0, $ymm1, 228
 
   RET64

diff  --git a/llvm/test/CodeGen/X86/opt_phis2.mir b/llvm/test/CodeGen/X86/opt_phis2.mir
index 23c75b37c27aae..d528bc7d4e569e 100644
--- a/llvm/test/CodeGen/X86/opt_phis2.mir
+++ b/llvm/test/CodeGen/X86/opt_phis2.mir
@@ -49,17 +49,17 @@ body:             |
 
   bb.4:
     %3:vr256 = COPY %8
-    %17:vr128 = VEXTRACTF128rr %8, 1
+    %17:vr128 = VEXTRACTF128rri %8, 1
     VPEXTRDmr %9, 1, $noreg, 12, $noreg, killed %17, 2
 
   bb.5:
     %4:vr256 = PHI %0, %bb.1, %3, %bb.4
-    %18:vr128 = VEXTRACTF128rr %4, 1
+    %18:vr128 = VEXTRACTF128rri %4, 1
     VPEXTRDmr %9, 1, $noreg, 8, $noreg, killed %18, 1
 
   bb.6:
     %5:vr256 = PHI %1, %bb.2, %4, %bb.5
-    %19:vr128 = VEXTRACTF128rr %5, 1
+    %19:vr128 = VEXTRACTF128rri %5, 1
     VMOVPDI2DImr %9, 1, $noreg, 4, $noreg, killed %19
 
   bb.7:

diff  --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc
index be1b59eb50c91c..c731de84759a95 100644
--- a/llvm/test/TableGen/x86-fold-tables.inc
+++ b/llvm/test/TableGen/x86-fold-tables.inc
@@ -486,20 +486,20 @@ static const X86FoldTableEntry Table0[] = {
   {X86::VCVTPS2PHYrr, X86::VCVTPS2PHYmr, TB_FOLDED_STORE},
   {X86::VCVTPS2PHZ256rr, X86::VCVTPS2PHZ256mr, TB_FOLDED_STORE},
   {X86::VCVTPS2PHZrr, X86::VCVTPS2PHZmr, TB_FOLDED_STORE},
-  {X86::VEXTRACTF128rr, X86::VEXTRACTF128mr, TB_FOLDED_STORE},
-  {X86::VEXTRACTF32x4Z256rr, X86::VEXTRACTF32x4Z256mr, TB_FOLDED_STORE},
-  {X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zmr, TB_FOLDED_STORE},
-  {X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zmr, TB_FOLDED_STORE},
-  {X86::VEXTRACTF64x2Z256rr, X86::VEXTRACTF64x2Z256mr, TB_FOLDED_STORE},
-  {X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zmr, TB_FOLDED_STORE},
-  {X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zmr, TB_FOLDED_STORE},
-  {X86::VEXTRACTI128rr, X86::VEXTRACTI128mr, TB_FOLDED_STORE},
-  {X86::VEXTRACTI32x4Z256rr, X86::VEXTRACTI32x4Z256mr, TB_FOLDED_STORE},
-  {X86::VEXTRACTI32x4Zrr, X86::VEXTRACTI32x4Zmr, TB_FOLDED_STORE},
-  {X86::VEXTRACTI32x8Zrr, X86::VEXTRACTI32x8Zmr, TB_FOLDED_STORE},
-  {X86::VEXTRACTI64x2Z256rr, X86::VEXTRACTI64x2Z256mr, TB_FOLDED_STORE},
-  {X86::VEXTRACTI64x2Zrr, X86::VEXTRACTI64x2Zmr, TB_FOLDED_STORE},
-  {X86::VEXTRACTI64x4Zrr, X86::VEXTRACTI64x4Zmr, TB_FOLDED_STORE},
+  {X86::VEXTRACTF128rri, X86::VEXTRACTF128mri, TB_FOLDED_STORE},
+  {X86::VEXTRACTF32x4Z256rri, X86::VEXTRACTF32x4Z256mri, TB_FOLDED_STORE},
+  {X86::VEXTRACTF32x4Zrri, X86::VEXTRACTF32x4Zmri, TB_FOLDED_STORE},
+  {X86::VEXTRACTF32x8Zrri, X86::VEXTRACTF32x8Zmri, TB_FOLDED_STORE},
+  {X86::VEXTRACTF64x2Z256rri, X86::VEXTRACTF64x2Z256mri, TB_FOLDED_STORE},
+  {X86::VEXTRACTF64x2Zrri, X86::VEXTRACTF64x2Zmri, TB_FOLDED_STORE},
+  {X86::VEXTRACTF64x4Zrri, X86::VEXTRACTF64x4Zmri, TB_FOLDED_STORE},
+  {X86::VEXTRACTI128rri, X86::VEXTRACTI128mri, TB_FOLDED_STORE},
+  {X86::VEXTRACTI32x4Z256rri, X86::VEXTRACTI32x4Z256mri, TB_FOLDED_STORE},
+  {X86::VEXTRACTI32x4Zrri, X86::VEXTRACTI32x4Zmri, TB_FOLDED_STORE},
+  {X86::VEXTRACTI32x8Zrri, X86::VEXTRACTI32x8Zmri, TB_FOLDED_STORE},
+  {X86::VEXTRACTI64x2Z256rri, X86::VEXTRACTI64x2Z256mri, TB_FOLDED_STORE},
+  {X86::VEXTRACTI64x2Zrri, X86::VEXTRACTI64x2Zmri, TB_FOLDED_STORE},
+  {X86::VEXTRACTI64x4Zrri, X86::VEXTRACTI64x4Zmri, TB_FOLDED_STORE},
   {X86::VEXTRACTPSZrr, X86::VEXTRACTPSZmr, TB_FOLDED_STORE},
   {X86::VEXTRACTPSrr, X86::VEXTRACTPSmr, TB_FOLDED_STORE},
   {X86::VMOV64toSDZrr, X86::MOV64mr, TB_FOLDED_STORE|TB_NO_REVERSE},
@@ -2986,20 +2986,20 @@ static const X86FoldTableEntry Table2[] = {
   {X86::VHSUBPDrr, X86::VHSUBPDrm, 0},
   {X86::VHSUBPSYrr, X86::VHSUBPSYrm, 0},
   {X86::VHSUBPSrr, X86::VHSUBPSrm, 0},
-  {X86::VINSERTF128rr, X86::VINSERTF128rm, 0},
-  {X86::VINSERTF32x4Z256rr, X86::VINSERTF32x4Z256rm, 0},
-  {X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrm, 0},
-  {X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrm, 0},
-  {X86::VINSERTF64x2Z256rr, X86::VINSERTF64x2Z256rm, 0},
-  {X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrm, 0},
-  {X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrm, 0},
-  {X86::VINSERTI128rr, X86::VINSERTI128rm, 0},
-  {X86::VINSERTI32x4Z256rr, X86::VINSERTI32x4Z256rm, 0},
-  {X86::VINSERTI32x4Zrr, X86::VINSERTI32x4Zrm, 0},
-  {X86::VINSERTI32x8Zrr, X86::VINSERTI32x8Zrm, 0},
-  {X86::VINSERTI64x2Z256rr, X86::VINSERTI64x2Z256rm, 0},
-  {X86::VINSERTI64x2Zrr, X86::VINSERTI64x2Zrm, 0},
-  {X86::VINSERTI64x4Zrr, X86::VINSERTI64x4Zrm, 0},
+  {X86::VINSERTF128rri, X86::VINSERTF128rmi, 0},
+  {X86::VINSERTF32x4Z256rri, X86::VINSERTF32x4Z256rmi, 0},
+  {X86::VINSERTF32x4Zrri, X86::VINSERTF32x4Zrmi, 0},
+  {X86::VINSERTF32x8Zrri, X86::VINSERTF32x8Zrmi, 0},
+  {X86::VINSERTF64x2Z256rri, X86::VINSERTF64x2Z256rmi, 0},
+  {X86::VINSERTF64x2Zrri, X86::VINSERTF64x2Zrmi, 0},
+  {X86::VINSERTF64x4Zrri, X86::VINSERTF64x4Zrmi, 0},
+  {X86::VINSERTI128rri, X86::VINSERTI128rmi, 0},
+  {X86::VINSERTI32x4Z256rri, X86::VINSERTI32x4Z256rmi, 0},
+  {X86::VINSERTI32x4Zrri, X86::VINSERTI32x4Zrmi, 0},
+  {X86::VINSERTI32x8Zrri, X86::VINSERTI32x8Zrmi, 0},
+  {X86::VINSERTI64x2Z256rri, X86::VINSERTI64x2Z256rmi, 0},
+  {X86::VINSERTI64x2Zrri, X86::VINSERTI64x2Zrmi, 0},
+  {X86::VINSERTI64x4Zrri, X86::VINSERTI64x4Zrmi, 0},
   {X86::VMAXCPDYrr, X86::VMAXCPDYrm, 0},
   {X86::VMAXCPDZ128rr, X86::VMAXCPDZ128rm, 0},
   {X86::VMAXCPDZ256rr, X86::VMAXCPDZ256rm, 0},
@@ -3411,8 +3411,8 @@ static const X86FoldTableEntry Table2[] = {
   {X86::VPCONFLICTQZ128rrkz, X86::VPCONFLICTQZ128rmkz, 0},
   {X86::VPCONFLICTQZ256rrkz, X86::VPCONFLICTQZ256rmkz, 0},
   {X86::VPCONFLICTQZrrkz, X86::VPCONFLICTQZrmkz, 0},
-  {X86::VPERM2F128rr, X86::VPERM2F128rm, 0},
-  {X86::VPERM2I128rr, X86::VPERM2I128rm, 0},
+  {X86::VPERM2F128rri, X86::VPERM2F128rmi, 0},
+  {X86::VPERM2I128rri, X86::VPERM2I128rmi, 0},
   {X86::VPERMBZ128rr, X86::VPERMBZ128rm, 0},
   {X86::VPERMBZ256rr, X86::VPERMBZ256rm, 0},
   {X86::VPERMBZrr, X86::VPERMBZrm, 0},
@@ -5057,18 +5057,18 @@ static const X86FoldTableEntry Table3[] = {
   {X86::VGF2P8MULBZ128rrkz, X86::VGF2P8MULBZ128rmkz, 0},
   {X86::VGF2P8MULBZ256rrkz, X86::VGF2P8MULBZ256rmkz, 0},
   {X86::VGF2P8MULBZrrkz, X86::VGF2P8MULBZrmkz, 0},
-  {X86::VINSERTF32x4Z256rrkz, X86::VINSERTF32x4Z256rmkz, 0},
-  {X86::VINSERTF32x4Zrrkz, X86::VINSERTF32x4Zrmkz, 0},
-  {X86::VINSERTF32x8Zrrkz, X86::VINSERTF32x8Zrmkz, 0},
-  {X86::VINSERTF64x2Z256rrkz, X86::VINSERTF64x2Z256rmkz, 0},
-  {X86::VINSERTF64x2Zrrkz, X86::VINSERTF64x2Zrmkz, 0},
-  {X86::VINSERTF64x4Zrrkz, X86::VINSERTF64x4Zrmkz, 0},
-  {X86::VINSERTI32x4Z256rrkz, X86::VINSERTI32x4Z256rmkz, 0},
-  {X86::VINSERTI32x4Zrrkz, X86::VINSERTI32x4Zrmkz, 0},
-  {X86::VINSERTI32x8Zrrkz, X86::VINSERTI32x8Zrmkz, 0},
-  {X86::VINSERTI64x2Z256rrkz, X86::VINSERTI64x2Z256rmkz, 0},
-  {X86::VINSERTI64x2Zrrkz, X86::VINSERTI64x2Zrmkz, 0},
-  {X86::VINSERTI64x4Zrrkz, X86::VINSERTI64x4Zrmkz, 0},
+  {X86::VINSERTF32x4Z256rrikz, X86::VINSERTF32x4Z256rmikz, 0},
+  {X86::VINSERTF32x4Zrrikz, X86::VINSERTF32x4Zrmikz, 0},
+  {X86::VINSERTF32x8Zrrikz, X86::VINSERTF32x8Zrmikz, 0},
+  {X86::VINSERTF64x2Z256rrikz, X86::VINSERTF64x2Z256rmikz, 0},
+  {X86::VINSERTF64x2Zrrikz, X86::VINSERTF64x2Zrmikz, 0},
+  {X86::VINSERTF64x4Zrrikz, X86::VINSERTF64x4Zrmikz, 0},
+  {X86::VINSERTI32x4Z256rrikz, X86::VINSERTI32x4Z256rmikz, 0},
+  {X86::VINSERTI32x4Zrrikz, X86::VINSERTI32x4Zrmikz, 0},
+  {X86::VINSERTI32x8Zrrikz, X86::VINSERTI32x8Zrmikz, 0},
+  {X86::VINSERTI64x2Z256rrikz, X86::VINSERTI64x2Z256rmikz, 0},
+  {X86::VINSERTI64x2Zrrikz, X86::VINSERTI64x2Zrmikz, 0},
+  {X86::VINSERTI64x4Zrrikz, X86::VINSERTI64x4Zrmikz, 0},
   {X86::VMAXCPDZ128rrkz, X86::VMAXCPDZ128rmkz, 0},
   {X86::VMAXCPDZ256rrkz, X86::VMAXCPDZ256rmkz, 0},
   {X86::VMAXCPDZrrkz, X86::VMAXCPDZrmkz, 0},
@@ -6679,18 +6679,18 @@ static const X86FoldTableEntry Table4[] = {
   {X86::VGF2P8MULBZ128rrk, X86::VGF2P8MULBZ128rmk, 0},
   {X86::VGF2P8MULBZ256rrk, X86::VGF2P8MULBZ256rmk, 0},
   {X86::VGF2P8MULBZrrk, X86::VGF2P8MULBZrmk, 0},
-  {X86::VINSERTF32x4Z256rrk, X86::VINSERTF32x4Z256rmk, 0},
-  {X86::VINSERTF32x4Zrrk, X86::VINSERTF32x4Zrmk, 0},
-  {X86::VINSERTF32x8Zrrk, X86::VINSERTF32x8Zrmk, 0},
-  {X86::VINSERTF64x2Z256rrk, X86::VINSERTF64x2Z256rmk, 0},
-  {X86::VINSERTF64x2Zrrk, X86::VINSERTF64x2Zrmk, 0},
-  {X86::VINSERTF64x4Zrrk, X86::VINSERTF64x4Zrmk, 0},
-  {X86::VINSERTI32x4Z256rrk, X86::VINSERTI32x4Z256rmk, 0},
-  {X86::VINSERTI32x4Zrrk, X86::VINSERTI32x4Zrmk, 0},
-  {X86::VINSERTI32x8Zrrk, X86::VINSERTI32x8Zrmk, 0},
-  {X86::VINSERTI64x2Z256rrk, X86::VINSERTI64x2Z256rmk, 0},
-  {X86::VINSERTI64x2Zrrk, X86::VINSERTI64x2Zrmk, 0},
-  {X86::VINSERTI64x4Zrrk, X86::VINSERTI64x4Zrmk, 0},
+  {X86::VINSERTF32x4Z256rrik, X86::VINSERTF32x4Z256rmik, 0},
+  {X86::VINSERTF32x4Zrrik, X86::VINSERTF32x4Zrmik, 0},
+  {X86::VINSERTF32x8Zrrik, X86::VINSERTF32x8Zrmik, 0},
+  {X86::VINSERTF64x2Z256rrik, X86::VINSERTF64x2Z256rmik, 0},
+  {X86::VINSERTF64x2Zrrik, X86::VINSERTF64x2Zrmik, 0},
+  {X86::VINSERTF64x4Zrrik, X86::VINSERTF64x4Zrmik, 0},
+  {X86::VINSERTI32x4Z256rrik, X86::VINSERTI32x4Z256rmik, 0},
+  {X86::VINSERTI32x4Zrrik, X86::VINSERTI32x4Zrmik, 0},
+  {X86::VINSERTI32x8Zrrik, X86::VINSERTI32x8Zrmik, 0},
+  {X86::VINSERTI64x2Z256rrik, X86::VINSERTI64x2Z256rmik, 0},
+  {X86::VINSERTI64x2Zrrik, X86::VINSERTI64x2Zrmik, 0},
+  {X86::VINSERTI64x4Zrrik, X86::VINSERTI64x4Zrmik, 0},
   {X86::VMAXCPDZ128rrk, X86::VMAXCPDZ128rmk, 0},
   {X86::VMAXCPDZ256rrk, X86::VMAXCPDZ256rmk, 0},
   {X86::VMAXCPDZrrk, X86::VMAXCPDZrmk, 0},

diff  --git a/llvm/utils/TableGen/X86ManualFoldTables.def b/llvm/utils/TableGen/X86ManualFoldTables.def
index c51bc9748d79b6..4a58deaa0ff1b4 100644
--- a/llvm/utils/TableGen/X86ManualFoldTables.def
+++ b/llvm/utils/TableGen/X86ManualFoldTables.def
@@ -31,18 +31,18 @@ NOFOLD(VCOMPRESSPSZrrk)
 NOFOLD(VCVTPS2PHZ128rrk)
 NOFOLD(VCVTPS2PHZ256rrk)
 NOFOLD(VCVTPS2PHZrrk)
-NOFOLD(VEXTRACTF32x4Z256rrk)
-NOFOLD(VEXTRACTF32x4Zrrk)
-NOFOLD(VEXTRACTF32x8Zrrk)
-NOFOLD(VEXTRACTF64x2Z256rrk)
-NOFOLD(VEXTRACTF64x2Zrrk)
-NOFOLD(VEXTRACTF64x4Zrrk)
-NOFOLD(VEXTRACTI32x4Z256rrk)
-NOFOLD(VEXTRACTI32x4Zrrk)
-NOFOLD(VEXTRACTI32x8Zrrk)
-NOFOLD(VEXTRACTI64x2Z256rrk)
-NOFOLD(VEXTRACTI64x2Zrrk)
-NOFOLD(VEXTRACTI64x4Zrrk)
+NOFOLD(VEXTRACTF32x4Z256rrik)
+NOFOLD(VEXTRACTF32x4Zrrik)
+NOFOLD(VEXTRACTF32x8Zrrik)
+NOFOLD(VEXTRACTF64x2Z256rrik)
+NOFOLD(VEXTRACTF64x2Zrrik)
+NOFOLD(VEXTRACTF64x4Zrrik)
+NOFOLD(VEXTRACTI32x4Z256rrik)
+NOFOLD(VEXTRACTI32x4Zrrik)
+NOFOLD(VEXTRACTI32x8Zrrik)
+NOFOLD(VEXTRACTI64x2Z256rrik)
+NOFOLD(VEXTRACTI64x2Zrrik)
+NOFOLD(VEXTRACTI64x4Zrrik)
 NOFOLD(VMOVAPDZ128mrk)
 NOFOLD(VMOVAPDZ256mrk)
 NOFOLD(VMOVAPDZmrk)

diff  --git a/llvm/utils/TableGen/X86ManualInstrMapping.def b/llvm/utils/TableGen/X86ManualInstrMapping.def
index f0154b80a80dbe..d76c404722b0ac 100644
--- a/llvm/utils/TableGen/X86ManualInstrMapping.def
+++ b/llvm/utils/TableGen/X86ManualInstrMapping.def
@@ -81,14 +81,14 @@ ENTRY(VMPSADBWZ128rmi, VMPSADBWrmi)
 ENTRY(VMPSADBWZ128rri, VMPSADBWrri)
 ENTRY(VMPSADBWZ256rmi, VMPSADBWYrmi)
 ENTRY(VMPSADBWZ256rri, VMPSADBWYrri)
-ENTRY(VSHUFF32X4Z256rmi, VPERM2F128rm)
-ENTRY(VSHUFF32X4Z256rri, VPERM2F128rr)
-ENTRY(VSHUFF64X2Z256rmi, VPERM2F128rm)
-ENTRY(VSHUFF64X2Z256rri, VPERM2F128rr)
-ENTRY(VSHUFI32X4Z256rmi, VPERM2I128rm)
-ENTRY(VSHUFI32X4Z256rri, VPERM2I128rr)
-ENTRY(VSHUFI64X2Z256rmi, VPERM2I128rm)
-ENTRY(VSHUFI64X2Z256rri, VPERM2I128rr)
+ENTRY(VSHUFF32X4Z256rmi, VPERM2F128rmi)
+ENTRY(VSHUFF32X4Z256rri, VPERM2F128rri)
+ENTRY(VSHUFF64X2Z256rmi, VPERM2F128rmi)
+ENTRY(VSHUFF64X2Z256rri, VPERM2F128rri)
+ENTRY(VSHUFI32X4Z256rmi, VPERM2I128rmi)
+ENTRY(VSHUFI32X4Z256rri, VPERM2I128rri)
+ENTRY(VSHUFI64X2Z256rmi, VPERM2I128rmi)
+ENTRY(VSHUFI64X2Z256rri, VPERM2I128rri)
 // W bit does not match
 ENTRY(VADDPDZ128rm, VADDPDrm)
 ENTRY(VADDPDZ128rr, VADDPDrr)
@@ -245,14 +245,14 @@ ENTRY(VCVTTPD2DQZ256rm, VCVTTPD2DQYrm)
 ENTRY(VCVTTPD2DQZ256rr, VCVTTPD2DQYrr)
 ENTRY(VDIVPDZ256rm, VDIVPDYrm)
 ENTRY(VDIVPDZ256rr, VDIVPDYrr)
-ENTRY(VEXTRACTF64x2Z256mr, VEXTRACTF128mr)
-ENTRY(VEXTRACTF64x2Z256rr, VEXTRACTF128rr)
-ENTRY(VEXTRACTI64x2Z256mr, VEXTRACTI128mr)
-ENTRY(VEXTRACTI64x2Z256rr, VEXTRACTI128rr)
-ENTRY(VINSERTF64x2Z256rm, VINSERTF128rm)
-ENTRY(VINSERTF64x2Z256rr, VINSERTF128rr)
-ENTRY(VINSERTI64x2Z256rm, VINSERTI128rm)
-ENTRY(VINSERTI64x2Z256rr, VINSERTI128rr)
+ENTRY(VEXTRACTF64x2Z256mr, VEXTRACTF128mri)
+ENTRY(VEXTRACTF64x2Z256rr, VEXTRACTF128rri)
+ENTRY(VEXTRACTI64x2Z256mr, VEXTRACTI128mri)
+ENTRY(VEXTRACTI64x2Z256rr, VEXTRACTI128rri)
+ENTRY(VINSERTF64x2Z256rm, VINSERTF128rmi)
+ENTRY(VINSERTF64x2Z256rr, VINSERTF128rri)
+ENTRY(VINSERTI64x2Z256rm, VINSERTI128rmi)
+ENTRY(VINSERTI64x2Z256rr, VINSERTI128rri)
 ENTRY(VMAXCPDZ256rm, VMAXCPDYrm)
 ENTRY(VMAXCPDZ256rr, VMAXCPDYrr)
 ENTRY(VMAXPDZ256rm, VMAXPDYrm)


        


More information about the llvm-commits mailing list