[llvm] 1e33bd2 - [X86] Add missing immediate qualifier to the (V)PINSR/PEXTR instruction names

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 15 06:30:15 PDT 2024


Author: Simon Pilgrim
Date: 2024-09-15T14:12:55+01:00
New Revision: 1e33bd2031a6d0f61d4b0ba82bf416e7ace3b9ce

URL: https://github.com/llvm/llvm-project/commit/1e33bd2031a6d0f61d4b0ba82bf416e7ace3b9ce
DIFF: https://github.com/llvm/llvm-project/commit/1e33bd2031a6d0f61d4b0ba82bf416e7ace3b9ce.diff

LOG: [X86] Add missing immediate qualifier to the (V)PINSR/PEXTR instruction names

Makes it easier to algorithmically recreate the instruction name in various analysis scripts I'm working on

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86InstrAVX512.td
    llvm/lib/Target/X86/X86InstrMMX.td
    llvm/lib/Target/X86/X86InstrSSE.td
    llvm/lib/Target/X86/X86ReplaceableInstrs.def
    llvm/lib/Target/X86/X86SchedAlderlakeP.td
    llvm/lib/Target/X86/X86SchedSapphireRapids.td
    llvm/test/CodeGen/X86/evex-to-vex-compress.mir
    llvm/test/CodeGen/X86/opt_phis2.mir
    llvm/test/TableGen/x86-fold-tables.inc

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index f97145db0daaf8..b6bf34a8a0d31c 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -11432,7 +11432,7 @@ defm VPUNPCKHQDQ : avx512_binop_rm_vl_q<0x6D, "vpunpckhqdq", X86Unpckh,
 
 multiclass avx512_extract_elt_bw_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
                                                             X86VectorVTInfo _> {
-  def mr : AVX512Ii8<opc, MRMDestMem, (outs),
+  def mri : AVX512Ii8<opc, MRMDestMem, (outs),
               (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
               OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
               [(store (_.EltVT (trunc (OpNode (_.VT _.RC:$src1), timm:$src2))),
@@ -11442,7 +11442,7 @@ multiclass avx512_extract_elt_bw_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
 
 multiclass avx512_extract_elt_b<string OpcodeStr, X86VectorVTInfo _> {
   let Predicates = [HasBWI] in {
-    def rr : AVX512Ii8<0x14, MRMDestReg, (outs GR32orGR64:$dst),
+    def rri : AVX512Ii8<0x14, MRMDestReg, (outs GR32orGR64:$dst),
                   (ins _.RC:$src1, u8imm:$src2),
                   OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                   [(set GR32orGR64:$dst,
@@ -11455,7 +11455,7 @@ multiclass avx512_extract_elt_b<string OpcodeStr, X86VectorVTInfo _> {
 
 multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> {
   let Predicates = [HasBWI] in {
-    def rr : AVX512Ii8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst),
+    def rri : AVX512Ii8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst),
                   (ins _.RC:$src1, u8imm:$src2),
                   OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                   [(set GR32orGR64:$dst,
@@ -11463,7 +11463,7 @@ multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> {
                   EVEX, TB, PD, Sched<[WriteVecExtract]>;
 
     let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in
-    def rr_REV : AVX512Ii8<0x15, MRMDestReg, (outs GR32orGR64:$dst),
+    def rri_REV : AVX512Ii8<0x15, MRMDestReg, (outs GR32orGR64:$dst),
                    (ins _.RC:$src1, u8imm:$src2),
                    OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
                    EVEX, TA, PD, Sched<[WriteVecExtract]>;
@@ -11475,14 +11475,14 @@ multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> {
 multiclass avx512_extract_elt_dq<string OpcodeStr, X86VectorVTInfo _,
                                                             RegisterClass GRC> {
   let Predicates = [HasDQI] in {
-    def rr : AVX512Ii8<0x16, MRMDestReg, (outs GRC:$dst),
+    def rri : AVX512Ii8<0x16, MRMDestReg, (outs GRC:$dst),
                   (ins _.RC:$src1, u8imm:$src2),
                   OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                   [(set GRC:$dst,
                       (extractelt (_.VT _.RC:$src1), imm:$src2))]>,
                   EVEX, TA, PD, Sched<[WriteVecExtract]>;
 
-    def mr : AVX512Ii8<0x16, MRMDestMem, (outs),
+    def mri : AVX512Ii8<0x16, MRMDestMem, (outs),
                 (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
                 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                 [(store (extractelt (_.VT _.RC:$src1),
@@ -11500,7 +11500,7 @@ defm VPEXTRQZ : avx512_extract_elt_dq<"vpextrq", v2i64x_info, GR64>, REX_W;
 multiclass avx512_insert_elt_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
                                             X86VectorVTInfo _, PatFrag LdFrag,
                                             SDPatternOperator immoperator> {
-  def rm : AVX512Ii8<opc, MRMSrcMem, (outs _.RC:$dst),
+  def rmi : AVX512Ii8<opc, MRMSrcMem, (outs _.RC:$dst),
       (ins _.RC:$src1,  _.ScalarMemOp:$src2, u8imm:$src3),
       OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
       [(set _.RC:$dst,
@@ -11511,7 +11511,7 @@ multiclass avx512_insert_elt_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
 multiclass avx512_insert_elt_bw<bits<8> opc, string OpcodeStr, SDNode OpNode,
                                             X86VectorVTInfo _, PatFrag LdFrag> {
   let Predicates = [HasBWI] in {
-    def rr : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst),
+    def rri : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst),
         (ins _.RC:$src1, GR32orGR64:$src2, u8imm:$src3),
         OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
         [(set _.RC:$dst,
@@ -11525,7 +11525,7 @@ multiclass avx512_insert_elt_bw<bits<8> opc, string OpcodeStr, SDNode OpNode,
 multiclass avx512_insert_elt_dq<bits<8> opc, string OpcodeStr,
                                          X86VectorVTInfo _, RegisterClass GRC> {
   let Predicates = [HasDQI] in {
-    def rr : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst),
+    def rri : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst),
         (ins _.RC:$src1, GRC:$src2, u8imm:$src3),
         OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
         [(set _.RC:$dst,
@@ -11548,27 +11548,27 @@ let Predicates = [HasAVX512, NoBWI] in {
   def : Pat<(X86pinsrb VR128:$src1,
                        (i32 (anyext (i8 (bitconvert v8i1:$src2)))),
                        timm:$src3),
-            (VPINSRBrr VR128:$src1, (i32 (COPY_TO_REGCLASS VK8:$src2, GR32)),
-                       timm:$src3)>;
+            (VPINSRBrri VR128:$src1, (i32 (COPY_TO_REGCLASS VK8:$src2, GR32)),
+                        timm:$src3)>;
 }
 
 let Predicates = [HasBWI] in {
   def : Pat<(X86pinsrb VR128:$src1, (i32 (anyext (i8 GR8:$src2))), timm:$src3),
-            (VPINSRBZrr VR128:$src1, (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
-                        GR8:$src2, sub_8bit), timm:$src3)>;
+            (VPINSRBZrri VR128:$src1, (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
+                         GR8:$src2, sub_8bit), timm:$src3)>;
   def : Pat<(X86pinsrb VR128:$src1,
                        (i32 (anyext (i8 (bitconvert v8i1:$src2)))),
                        timm:$src3),
-            (VPINSRBZrr VR128:$src1, (i32 (COPY_TO_REGCLASS VK8:$src2, GR32)),
-                        timm:$src3)>;
+            (VPINSRBZrri VR128:$src1, (i32 (COPY_TO_REGCLASS VK8:$src2, GR32)),
+                         timm:$src3)>;
 }
 
 // Always select FP16 instructions if available.
 let Predicates = [HasBWI], AddedComplexity = -10 in {
-  def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (VPINSRWZrm (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16X)>;
-  def : Pat<(store f16:$src, addr:$dst), (VPEXTRWZmr addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>;
-  def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (VPEXTRWZrr (v8i16 (COPY_TO_REGCLASS FR16X:$src, VR128X)), 0), sub_16bit)>;
-  def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (VPINSRWZrr (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16X)>;
+  def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (VPINSRWZrmi (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16X)>;
+  def : Pat<(store f16:$src, addr:$dst), (VPEXTRWZmri addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>;
+  def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (VPEXTRWZrri (v8i16 (COPY_TO_REGCLASS FR16X:$src, VR128X)), 0), sub_16bit)>;
+  def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (VPINSRWZrri (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16X)>;
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/X86/X86InstrMMX.td b/llvm/lib/Target/X86/X86InstrMMX.td
index 60dfe66960507d..644d6d0b92dfc7 100644
--- a/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/llvm/lib/Target/X86/X86InstrMMX.td
@@ -509,7 +509,7 @@ let Constraints = "$src1 = $dst" in {
 
 // Extract / Insert
 let Predicates = [HasMMX, HasSSE1] in
-def MMX_PEXTRWrr: MMXIi8<0xC5, MRMSrcReg,
+def MMX_PEXTRWrri : MMXIi8<0xC5, MRMSrcReg,
                      (outs GR32orGR64:$dst), (ins VR64:$src1, i32u8imm:$src2),
                      "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                      [(set GR32orGR64:$dst, (int_x86_mmx_pextr_w VR64:$src1,
@@ -517,7 +517,7 @@ def MMX_PEXTRWrr: MMXIi8<0xC5, MRMSrcReg,
                      Sched<[WriteVecExtract]>;
 let Constraints = "$src1 = $dst" in {
 let Predicates = [HasMMX, HasSSE1] in {
-  def MMX_PINSRWrr : MMXIi8<0xC4, MRMSrcReg,
+  def MMX_PINSRWrri : MMXIi8<0xC4, MRMSrcReg,
                     (outs VR64:$dst),
                     (ins VR64:$src1, GR32orGR64:$src2, i32u8imm:$src3),
                     "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
@@ -525,7 +525,7 @@ let Predicates = [HasMMX, HasSSE1] in {
                                       GR32orGR64:$src2, timm:$src3))]>,
                     Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
 
-  def MMX_PINSRWrm : MMXIi8<0xC4, MRMSrcMem,
+  def MMX_PINSRWrmi : MMXIi8<0xC4, MRMSrcMem,
                    (outs VR64:$dst),
                    (ins VR64:$src1, i16mem:$src2, i32u8imm:$src3),
                    "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",

diff  --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index d4b49051012c68..d51125a209db9d 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -3989,7 +3989,7 @@ let Constraints = "$src1 = $dst" in {
 
 let ExeDomain = SSEPackedInt in {
 multiclass sse2_pinsrw<bit Is2Addr = 1> {
-  def rr : Ii8<0xC4, MRMSrcReg,
+  def rri : Ii8<0xC4, MRMSrcReg,
        (outs VR128:$dst), (ins VR128:$src1,
         GR32orGR64:$src2, u8imm:$src3),
        !if(Is2Addr,
@@ -3998,7 +3998,7 @@ multiclass sse2_pinsrw<bit Is2Addr = 1> {
        [(set VR128:$dst,
          (X86pinsrw VR128:$src1, GR32orGR64:$src2, timm:$src3))]>,
        Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
-  def rm : Ii8<0xC4, MRMSrcMem,
+  def rmi : Ii8<0xC4, MRMSrcMem,
                       (outs VR128:$dst), (ins VR128:$src1,
                        i16mem:$src2, u8imm:$src3),
        !if(Is2Addr,
@@ -4012,13 +4012,13 @@ multiclass sse2_pinsrw<bit Is2Addr = 1> {
 
 // Extract
 let Predicates = [HasAVX, NoBWI] in
-def VPEXTRWrr : Ii8<0xC5, MRMSrcReg,
+def VPEXTRWrri : Ii8<0xC5, MRMSrcReg,
                     (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
                     "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                     [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
                                             timm:$src2))]>,
                 TB, PD, VEX, WIG, Sched<[WriteVecExtract]>;
-def PEXTRWrr : PDIi8<0xC5, MRMSrcReg,
+def PEXTRWrri : PDIi8<0xC5, MRMSrcReg,
                     (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
                     "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                     [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
@@ -4036,16 +4036,16 @@ defm PINSRW : sse2_pinsrw, TB, PD;
 
 // Always select FP16 instructions if available.
 let Predicates = [UseSSE2], AddedComplexity = -10 in {
-  def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (PINSRWrm (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16)>;
-  def : Pat<(store f16:$src, addr:$dst), (MOV16mr addr:$dst, (EXTRACT_SUBREG (PEXTRWrr (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit))>;
-  def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (PEXTRWrr (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit)>;
-  def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (PINSRWrr (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16)>;
+  def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (PINSRWrmi (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16)>;
+  def : Pat<(store f16:$src, addr:$dst), (MOV16mr addr:$dst, (EXTRACT_SUBREG (PEXTRWrri (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit))>;
+  def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (PEXTRWrri (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit)>;
+  def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (PINSRWrri (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16)>;
 }
 
 let Predicates = [HasAVX, NoBWI] in {
-  def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (VPINSRWrm (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16)>;
-  def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (VPEXTRWrr (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit)>;
-  def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (VPINSRWrr (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16)>;
+  def : Pat<(f16 (load addr:$src)), (COPY_TO_REGCLASS (VPINSRWrmi (v8i16 (IMPLICIT_DEF)), addr:$src, 0), FR16)>;
+  def : Pat<(i16 (bitconvert f16:$src)), (EXTRACT_SUBREG (VPEXTRWrri (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0), sub_16bit)>;
+  def : Pat<(f16 (bitconvert i16:$src)), (COPY_TO_REGCLASS (VPINSRWrri (v8i16 (IMPLICIT_DEF)), (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit), 0), FR16)>;
 }
 
 //===---------------------------------------------------------------------===//
@@ -5234,7 +5234,7 @@ let Predicates = [UseSSE41] in {
 
 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
-  def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
+  def rri : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
                  (ins VR128:$src1, u8imm:$src2),
                  !strconcat(OpcodeStr,
                             "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -5242,7 +5242,7 @@ multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
                                          timm:$src2))]>,
                   Sched<[WriteVecExtract]>;
   let hasSideEffects = 0, mayStore = 1 in
-  def mr : SS4AIi8<opc, MRMDestMem, (outs),
+  def mri : SS4AIi8<opc, MRMDestMem, (outs),
                  (ins i8mem:$dst, VR128:$src1, u8imm:$src2),
                  !strconcat(OpcodeStr,
                             "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -5259,14 +5259,14 @@ defm PEXTRB      : SS41I_extract8<0x14, "pextrb">;
 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
   let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
-  def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
+  def rri_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
                    (ins VR128:$src1, u8imm:$src2),
                    !strconcat(OpcodeStr,
                    "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
                    Sched<[WriteVecExtract]>;
 
   let hasSideEffects = 0, mayStore = 1 in
-  def mr : SS4AIi8<opc, MRMDestMem, (outs),
+  def mri : SS4AIi8<opc, MRMDestMem, (outs),
                  (ins i16mem:$dst, VR128:$src1, u8imm:$src2),
                  !strconcat(OpcodeStr,
                   "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -5280,22 +5280,22 @@ let Predicates = [HasAVX, NoBWI] in
 defm PEXTRW      : SS41I_extract16<0x15, "pextrw">;
 
 let Predicates = [UseSSE41] in
-  def : Pat<(store f16:$src, addr:$dst), (PEXTRWmr addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>;
+  def : Pat<(store f16:$src, addr:$dst), (PEXTRWmri addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>;
 
 let Predicates = [HasAVX, NoBWI] in
-  def : Pat<(store f16:$src, addr:$dst), (VPEXTRWmr addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>;
+  def : Pat<(store f16:$src, addr:$dst), (VPEXTRWmri addr:$dst, (v8i16 (COPY_TO_REGCLASS FR16:$src, VR128)), 0)>;
 
 
 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
-  def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
+  def rri : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
                  (ins VR128:$src1, u8imm:$src2),
                  !strconcat(OpcodeStr,
                   "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                  [(set GR32:$dst,
                   (extractelt (v4i32 VR128:$src1), imm:$src2))]>,
                   Sched<[WriteVecExtract]>;
-  def mr : SS4AIi8<opc, MRMDestMem, (outs),
+  def mri : SS4AIi8<opc, MRMDestMem, (outs),
                  (ins i32mem:$dst, VR128:$src1, u8imm:$src2),
                  !strconcat(OpcodeStr,
                   "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -5310,14 +5310,14 @@ defm PEXTRD      : SS41I_extract32<0x16, "pextrd">;
 
 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
-  def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
+  def rri : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
                  (ins VR128:$src1, u8imm:$src2),
                  !strconcat(OpcodeStr,
                   "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                  [(set GR64:$dst,
                   (extractelt (v2i64 VR128:$src1), imm:$src2))]>,
                   Sched<[WriteVecExtract]>;
-  def mr : SS4AIi8<opc, MRMDestMem, (outs),
+  def mri : SS4AIi8<opc, MRMDestMem, (outs),
                  (ins i64mem:$dst, VR128:$src1, u8imm:$src2),
                  !strconcat(OpcodeStr,
                   "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -5359,7 +5359,7 @@ let ExeDomain = SSEPackedSingle in {
 //===----------------------------------------------------------------------===//
 
 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
-  def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+  def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
       (ins VR128:$src1, GR32orGR64:$src2, u8imm:$src3),
       !if(Is2Addr,
         !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -5368,7 +5368,7 @@ multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
       [(set VR128:$dst,
         (X86pinsrb VR128:$src1, GR32orGR64:$src2, timm:$src3))]>,
       Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
-  def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+  def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
       (ins VR128:$src1, i8mem:$src2, u8imm:$src3),
       !if(Is2Addr,
         !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -5382,15 +5382,15 @@ multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
 let Predicates = [HasAVX, NoBWI] in {
   defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX, VVVV, WIG;
   def : Pat<(X86pinsrb VR128:$src1, (i32 (anyext (i8 GR8:$src2))), timm:$src3),
-            (VPINSRBrr VR128:$src1, (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
-                       GR8:$src2, sub_8bit), timm:$src3)>;
+            (VPINSRBrri VR128:$src1, (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
+                        GR8:$src2, sub_8bit), timm:$src3)>;
 }
 
 let Constraints = "$src1 = $dst" in
   defm PINSRB  : SS41I_insert8<0x20, "pinsrb">;
 
 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
-  def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+  def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
       (ins VR128:$src1, GR32:$src2, u8imm:$src3),
       !if(Is2Addr,
         !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -5399,7 +5399,7 @@ multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
       [(set VR128:$dst,
         (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
       Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
-  def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+  def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
       (ins VR128:$src1, i32mem:$src2, u8imm:$src3),
       !if(Is2Addr,
         !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -5416,7 +5416,7 @@ let Constraints = "$src1 = $dst" in
   defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
 
 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
-  def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+  def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
       (ins VR128:$src1, GR64:$src2, u8imm:$src3),
       !if(Is2Addr,
         !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -5425,7 +5425,7 @@ multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
       [(set VR128:$dst,
         (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
       Sched<[WriteVecInsert, ReadDefault, ReadInt2Fpu]>;
-  def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+  def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
       (ins VR128:$src1, i64mem:$src2, u8imm:$src3),
       !if(Is2Addr,
         !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),

diff  --git a/llvm/lib/Target/X86/X86ReplaceableInstrs.def b/llvm/lib/Target/X86/X86ReplaceableInstrs.def
index e9107af2acac7f..9deb7a8bdacb8f 100644
--- a/llvm/lib/Target/X86/X86ReplaceableInstrs.def
+++ b/llvm/lib/Target/X86/X86ReplaceableInstrs.def
@@ -42,8 +42,8 @@ ENTRY(UNPCKLPSrm, UNPCKLPSrm, PUNPCKLDQrm)
 ENTRY(UNPCKLPSrr, UNPCKLPSrr, PUNPCKLDQrr)
 ENTRY(UNPCKHPSrm, UNPCKHPSrm, PUNPCKHDQrm)
 ENTRY(UNPCKHPSrr, UNPCKHPSrr, PUNPCKHDQrr)
-ENTRY(EXTRACTPSmri, EXTRACTPSmri, PEXTRDmr)
-ENTRY(EXTRACTPSrri, EXTRACTPSrri, PEXTRDrr)
+ENTRY(EXTRACTPSmri, EXTRACTPSmri, PEXTRDmri)
+ENTRY(EXTRACTPSrri, EXTRACTPSrri, PEXTRDrri)
 // AVX 128-bit support
 ENTRY(VMOVAPSmr, VMOVAPDmr, VMOVDQAmr)
 ENTRY(VMOVAPSrm, VMOVAPDrm, VMOVDQArm)
@@ -74,8 +74,8 @@ ENTRY(VUNPCKLPSrm, VUNPCKLPSrm, VPUNPCKLDQrm)
 ENTRY(VUNPCKLPSrr, VUNPCKLPSrr, VPUNPCKLDQrr)
 ENTRY(VUNPCKHPSrm, VUNPCKHPSrm, VPUNPCKHDQrm)
 ENTRY(VUNPCKHPSrr, VUNPCKHPSrr, VPUNPCKHDQrr)
-ENTRY(VEXTRACTPSmri, VEXTRACTPSmri, VPEXTRDmr)
-ENTRY(VEXTRACTPSrri, VEXTRACTPSrri, VPEXTRDrr)
+ENTRY(VEXTRACTPSmri, VEXTRACTPSmri, VPEXTRDmri)
+ENTRY(VEXTRACTPSrri, VEXTRACTPSrri, VPEXTRDrri)
 // AVX 256-bit support
 ENTRY(VMOVAPSYmr, VMOVAPDYmr, VMOVDQAYmr)
 ENTRY(VMOVAPSYrm, VMOVAPDYrm, VMOVDQAYrm)
@@ -178,8 +178,8 @@ ENTRY(VUNPCKLPSZrm, VUNPCKLPSZrm, VPUNPCKLDQZrm)
 ENTRY(VUNPCKLPSZrr, VUNPCKLPSZrr, VPUNPCKLDQZrr)
 ENTRY(VUNPCKHPSZrm, VUNPCKHPSZrm, VPUNPCKHDQZrm)
 ENTRY(VUNPCKHPSZrr, VUNPCKHPSZrr, VPUNPCKHDQZrr)
-ENTRY(VEXTRACTPSZmri, VEXTRACTPSZmri, VPEXTRDZmr)
-ENTRY(VEXTRACTPSZrri, VEXTRACTPSZrri, VPEXTRDZrr)
+ENTRY(VEXTRACTPSZmri, VEXTRACTPSZmri, VPEXTRDZmri)
+ENTRY(VEXTRACTPSZrri, VEXTRACTPSZrri, VPEXTRDZrri)
 };
 
 static const uint16_t ReplaceableInstrsAVX2[][3] = {

diff  --git a/llvm/lib/Target/X86/X86SchedAlderlakeP.td b/llvm/lib/Target/X86/X86SchedAlderlakeP.td
index e0a5a4f5b49e5b..aec6906310d96b 100644
--- a/llvm/lib/Target/X86/X86SchedAlderlakeP.td
+++ b/llvm/lib/Target/X86/X86SchedAlderlakeP.td
@@ -894,7 +894,7 @@ def ADLPWriteResGroup51 : SchedWriteRes<[ADLPPort00, ADLPPort05]> {
   let NumMicroOps = 2;
 }
 def : InstRW<[ADLPWriteResGroup51], (instregex "^(V?)EXTRACTPSrri$")>;
-def : InstRW<[ADLPWriteResGroup51], (instrs MMX_PEXTRWrr)>;
+def : InstRW<[ADLPWriteResGroup51], (instrs MMX_PEXTRWrri)>;
 
 def ADLPWriteResGroup52 : SchedWriteRes<[ADLPPort00_01_05_06, ADLPPort02_03, ADLPPort02_03_07, ADLPPort04, ADLPPort06]> {
   let Latency = 7;
@@ -1367,7 +1367,7 @@ def ADLPWriteResGroup121 : SchedWriteRes<[ADLPPort05]> {
 }
 def : InstRW<[ADLPWriteResGroup121], (instregex "^MMX_PACKSS(DW|WB)rr$")>;
 def : InstRW<[ADLPWriteResGroup121], (instrs MMX_PACKUSWBrr)>;
-def : InstRW<[ADLPWriteResGroup121, ReadDefault, ReadInt2Fpu], (instrs MMX_PINSRWrr)>;
+def : InstRW<[ADLPWriteResGroup121, ReadDefault, ReadInt2Fpu], (instrs MMX_PINSRWrri)>;
 
 def ADLPWriteResGroup122 : SchedWriteRes<[ADLPPort00_05, ADLPPort02_03_11]> {
   let Latency = 9;
@@ -1394,7 +1394,7 @@ def ADLPWriteResGroup125 : SchedWriteRes<[ADLPPort02_03_11, ADLPPort05]> {
   let NumMicroOps = 2;
 }
 def : InstRW<[ADLPWriteResGroup125], (instregex "^VPBROADCAST(B|W)Yrm$")>;
-def : InstRW<[ADLPWriteResGroup125, ReadAfterLd], (instrs MMX_PINSRWrm)>;
+def : InstRW<[ADLPWriteResGroup125, ReadAfterLd], (instrs MMX_PINSRWrmi)>;
 def : InstRW<[ADLPWriteResGroup125, ReadAfterVecYLd], (instrs VPALIGNRYrmi)>;
 
 def ADLPWriteResGroup126 : SchedWriteRes<[ADLPPort00_01_05_06_10, ADLPPort02_03_11]> {
@@ -1721,7 +1721,7 @@ def ADLPWriteResGroup176 : SchedWriteRes<[ADLPPort01_05, ADLPPort04_09, ADLPPort
   let Latency = 12;
   let NumMicroOps = 3;
 }
-def : InstRW<[ADLPWriteResGroup176], (instregex "^(V?)PEXTR(D|Q)mr$")>;
+def : InstRW<[ADLPWriteResGroup176], (instregex "^(V?)PEXTR(D|Q)mri$")>;
 
 def ADLPWriteResGroup177 : SchedWriteRes<[ADLPPort00_01, ADLPPort01_05, ADLPPort02_03_11]> {
   let ReleaseAtCycles = [1, 2, 1];

diff  --git a/llvm/lib/Target/X86/X86SchedSapphireRapids.td b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
index ae282b8c287f82..1fb3c7560a5724 100644
--- a/llvm/lib/Target/X86/X86SchedSapphireRapids.td
+++ b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
@@ -1014,7 +1014,7 @@ def SPRWriteResGroup55 : SchedWriteRes<[SPRPort00, SPRPort05]> {
   let NumMicroOps = 2;
 }
 def : InstRW<[SPRWriteResGroup55], (instregex "^(V?)EXTRACTPSrri$")>;
-def : InstRW<[SPRWriteResGroup55], (instrs MMX_PEXTRWrr,
+def : InstRW<[SPRWriteResGroup55], (instrs MMX_PEXTRWrri,
                                            VEXTRACTPSZrri,
                                            VPERMWZrr)>;
 
@@ -1646,7 +1646,7 @@ def : InstRW<[SPRWriteResGroup130], (instregex "^MMX_PACKSS(DW|WB)rr$",
                                                "^VPMOV(U?)SQDZrrk(z?)$",
                                                "^VPMOVUS(Q|W)BZrr$")>;
 def : InstRW<[SPRWriteResGroup130], (instrs MMX_PACKUSWBrr)>;
-def : InstRW<[SPRWriteResGroup130, ReadDefault, ReadInt2Fpu], (instrs MMX_PINSRWrr)>;
+def : InstRW<[SPRWriteResGroup130, ReadDefault, ReadInt2Fpu], (instrs MMX_PINSRWrri)>;
 
 def SPRWriteResGroup131 : SchedWriteRes<[SPRPort00_05, SPRPort02_03_11]> {
   let Latency = 9;
@@ -1691,7 +1691,7 @@ def SPRWriteResGroup134 : SchedWriteRes<[SPRPort02_03_11, SPRPort05]> {
 def : InstRW<[SPRWriteResGroup134], (instregex "^VPBROADCAST(BY|WZ)rm$",
                                                "^VPBROADCAST(B|W)Z256rm$",
                                                "^VPBROADCAST(BZ|WY)rm$")>;
-def : InstRW<[SPRWriteResGroup134, ReadAfterLd], (instrs MMX_PINSRWrm)>;
+def : InstRW<[SPRWriteResGroup134, ReadAfterLd], (instrs MMX_PINSRWrmi)>;
 def : InstRW<[SPRWriteResGroup134, ReadAfterVecXLd], (instregex "^VFPCLASSP(D|S)Z128rm$")>;
 def : InstRW<[SPRWriteResGroup134, ReadAfterVecLd], (instregex "^VFPCLASSS(D|H|S)Zrm$")>;
 def : InstRW<[SPRWriteResGroup134, ReadAfterVecYLd], (instregex "^VPALIGNR(Y|Z256)rmi$")>;
@@ -2050,8 +2050,8 @@ def SPRWriteResGroup182 : SchedWriteRes<[SPRPort01_05, SPRPort04_09, SPRPort07_0
   let Latency = 12;
   let NumMicroOps = 3;
 }
-def : InstRW<[SPRWriteResGroup182], (instregex "^(V?)PEXTR(D|Q)mr$",
-                                               "^VPEXTR(D|Q)Zmr$",
+def : InstRW<[SPRWriteResGroup182], (instregex "^(V?)PEXTR(D|Q)mri$",
+                                               "^VPEXTR(D|Q)Zmri$",
                                                "^VPMOVQDZ128mr(k?)$")>;
 
 def SPRWriteResGroup183 : SchedWriteRes<[SPRPort00_01, SPRPort01_05, SPRPort02_03_11]> {

diff  --git a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
index f89e958bc9ad6a..2f587d789779cf 100644
--- a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
+++ b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
@@ -2074,38 +2074,38 @@ body: |
   $xmm0 = VFNMSUB231SSZr                       $xmm0, $xmm1, $xmm2, implicit $mxcsr
   ; CHECK: $xmm0 = VFNMSUB231SSr_Int           $xmm0, $xmm1, $xmm2, implicit $mxcsr
   $xmm0 = VFNMSUB231SSZr_Int                   $xmm0, $xmm1, $xmm2, implicit $mxcsr
-  ; CHECK: VPEXTRBmr                           $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
-  VPEXTRBZmr                                   $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
-  ; CHECK: $eax = VPEXTRBrr                    $xmm0, 1
-  $eax = VPEXTRBZrr                            $xmm0, 1
-  ; CHECK: VPEXTRDmr                           $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
-  VPEXTRDZmr                                   $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
-  ; CHECK: $eax = VPEXTRDrr                    $xmm0, 1
-  $eax = VPEXTRDZrr                            $xmm0, 1
-  ; CHECK: VPEXTRQmr                           $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
-  VPEXTRQZmr                                   $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
-  ; CHECK: $rax = VPEXTRQrr                    $xmm0, 1
-  $rax = VPEXTRQZrr                            $xmm0, 1
-  ; CHECK: VPEXTRWmr                           $rdi, 1, $noreg, 0, $noreg,  $xmm0, 3
-  VPEXTRWZmr                                   $rdi, 1, $noreg, 0, $noreg,  $xmm0, 3
-  ; CHECK: $eax = VPEXTRWrr                    $xmm0, 1
-  $eax = VPEXTRWZrr                            $xmm0, 1
-  ; CHECK: $xmm0 = VPINSRBrm                   $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
-  $xmm0 = VPINSRBZrm                           $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
-  ; CHECK: $xmm0 = VPINSRBrr                   $xmm0, $edi, 5
-  $xmm0 = VPINSRBZrr                           $xmm0, $edi, 5
-  ; CHECK: $xmm0 = VPINSRDrm                   $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
-  $xmm0 = VPINSRDZrm                           $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
-  ; CHECK: $xmm0 = VPINSRDrr                   $xmm0, $edi, 5
-  $xmm0 = VPINSRDZrr                           $xmm0, $edi, 5
-  ; CHECK: $xmm0 = VPINSRQrm                   $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
-  $xmm0 = VPINSRQZrm                           $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
-  ; CHECK: $xmm0 = VPINSRQrr                   $xmm0, $rdi, 5
-  $xmm0 = VPINSRQZrr                           $xmm0, $rdi, 5
-  ; CHECK: $xmm0 = VPINSRWrm                   $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
-  $xmm0 = VPINSRWZrm                           $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
-  ; CHECK: $xmm0 = VPINSRWrr                   $xmm0, $edi, 5
-  $xmm0 = VPINSRWZrr                           $xmm0, $edi, 5
+  ; CHECK: VPEXTRBmri                          $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+  VPEXTRBZmri                                  $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+  ; CHECK: $eax = VPEXTRBrri                   $xmm0, 1
+  $eax = VPEXTRBZrri                           $xmm0, 1
+  ; CHECK: VPEXTRDmri                          $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+  VPEXTRDZmri                                  $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+  ; CHECK: $eax = VPEXTRDrri                   $xmm0, 1
+  $eax = VPEXTRDZrri                           $xmm0, 1
+  ; CHECK: VPEXTRQmri                          $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+  VPEXTRQZmri                                  $rdi, 1, $noreg, 0, $noreg, $xmm0, 3
+  ; CHECK: $rax = VPEXTRQrri                   $xmm0, 1
+  $rax = VPEXTRQZrri                           $xmm0, 1
+  ; CHECK: VPEXTRWmri                          $rdi, 1, $noreg, 0, $noreg,  $xmm0, 3
+  VPEXTRWZmri                                  $rdi, 1, $noreg, 0, $noreg,  $xmm0, 3
+  ; CHECK: $eax = VPEXTRWrri                   $xmm0, 1
+  $eax = VPEXTRWZrri                           $xmm0, 1
+  ; CHECK: $xmm0 = VPINSRBrmi                  $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+  $xmm0 = VPINSRBZrmi                          $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+  ; CHECK: $xmm0 = VPINSRBrri                  $xmm0, $edi, 5
+  $xmm0 = VPINSRBZrri                          $xmm0, $edi, 5
+  ; CHECK: $xmm0 = VPINSRDrmi                  $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+  $xmm0 = VPINSRDZrmi                          $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+  ; CHECK: $xmm0 = VPINSRDrri                  $xmm0, $edi, 5
+  $xmm0 = VPINSRDZrri                          $xmm0, $edi, 5
+  ; CHECK: $xmm0 = VPINSRQrmi                  $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+  $xmm0 = VPINSRQZrmi                          $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+  ; CHECK: $xmm0 = VPINSRQrri                  $xmm0, $rdi, 5
+  $xmm0 = VPINSRQZrri                          $xmm0, $rdi, 5
+  ; CHECK: $xmm0 = VPINSRWrmi                  $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+  $xmm0 = VPINSRWZrmi                          $xmm0, $rsi, 1, $noreg, 0, $noreg, 3
+  ; CHECK: $xmm0 = VPINSRWrri                  $xmm0, $edi, 5
+  $xmm0 = VPINSRWZrri                          $xmm0, $edi, 5
   ; CHECK: $xmm0 = VSQRTSDm                    $xmm0, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr
   $xmm0 = VSQRTSDZm                            $xmm0, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr
   ; CHECK: $xmm0 = VSQRTSDm_Int                $xmm0, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr
@@ -4406,38 +4406,38 @@ body: |
   $xmm16 = VFNMSUB231SSZr                      $xmm16, $xmm1, $xmm2, implicit $mxcsr
   ; CHECK: $xmm16 = VFNMSUB231SSZr_Int         $xmm16, $xmm1, $xmm2, implicit $mxcsr
   $xmm16 = VFNMSUB231SSZr_Int                  $xmm16, $xmm1, $xmm2, implicit $mxcsr
-  ; CHECK: VPEXTRBZmr                          $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
-  VPEXTRBZmr                                   $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
-  ; CHECK: $eax = VPEXTRBZrr                   $xmm16, 1
-  $eax = VPEXTRBZrr                            $xmm16, 1
-  ; CHECK: VPEXTRDZmr                          $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
-  VPEXTRDZmr                                   $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
-  ; CHECK: $eax = VPEXTRDZrr                   $xmm16, 1
-  $eax = VPEXTRDZrr                            $xmm16, 1
-  ; CHECK: VPEXTRQZmr                          $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
-  VPEXTRQZmr                                   $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
-  ; CHECK: $rax = VPEXTRQZrr                   $xmm16, 1
-  $rax = VPEXTRQZrr                            $xmm16, 1
-  ; CHECK: VPEXTRWZmr                          $rdi, 1, $noreg, 0, $noreg,  $xmm16, 3
-  VPEXTRWZmr                                   $rdi, 1, $noreg, 0, $noreg,  $xmm16, 3
-  ; CHECK: $eax = VPEXTRWZrr                   $xmm16, 1
-  $eax = VPEXTRWZrr                            $xmm16, 1
-  ; CHECK: $xmm16 = VPINSRBZrm                 $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
-  $xmm16 = VPINSRBZrm                          $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
-  ; CHECK: $xmm16 = VPINSRBZrr                 $xmm16, $edi, 5
-  $xmm16 = VPINSRBZrr                          $xmm16, $edi, 5
-  ; CHECK: $xmm16 = VPINSRDZrm                 $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
-  $xmm16 = VPINSRDZrm                          $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
-  ; CHECK: $xmm16 = VPINSRDZrr                 $xmm16, $edi, 5
-  $xmm16 = VPINSRDZrr                          $xmm16, $edi, 5
-  ; CHECK: $xmm16 = VPINSRQZrm                 $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
-  $xmm16 = VPINSRQZrm                          $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
-  ; CHECK: $xmm16 = VPINSRQZrr                 $xmm16, $rdi, 5
-  $xmm16 = VPINSRQZrr                          $xmm16, $rdi, 5
-  ; CHECK: $xmm16 = VPINSRWZrm                 $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
-  $xmm16 = VPINSRWZrm                          $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
-  ; CHECK: $xmm16 = VPINSRWZrr                 $xmm16, $edi, 5
-  $xmm16 = VPINSRWZrr                          $xmm16, $edi, 5
+  ; CHECK: VPEXTRBZmri                         $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+  VPEXTRBZmri                                  $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+  ; CHECK: $eax = VPEXTRBZrri                  $xmm16, 1
+  $eax = VPEXTRBZrri                           $xmm16, 1
+  ; CHECK: VPEXTRDZmri                         $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+  VPEXTRDZmri                                  $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+  ; CHECK: $eax = VPEXTRDZrri                  $xmm16, 1
+  $eax = VPEXTRDZrri                           $xmm16, 1
+  ; CHECK: VPEXTRQZmri                         $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+  VPEXTRQZmri                                  $rdi, 1, $noreg, 0, $noreg, $xmm16, 3
+  ; CHECK: $rax = VPEXTRQZrri                  $xmm16, 1
+  $rax = VPEXTRQZrri                           $xmm16, 1
+  ; CHECK: VPEXTRWZmri                         $rdi, 1, $noreg, 0, $noreg,  $xmm16, 3
+  VPEXTRWZmri                                  $rdi, 1, $noreg, 0, $noreg,  $xmm16, 3
+  ; CHECK: $eax = VPEXTRWZrri                  $xmm16, 1
+  $eax = VPEXTRWZrri                           $xmm16, 1
+  ; CHECK: $xmm16 = VPINSRBZrmi                $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+  $xmm16 = VPINSRBZrmi                         $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+  ; CHECK: $xmm16 = VPINSRBZrri                $xmm16, $edi, 5
+  $xmm16 = VPINSRBZrri                         $xmm16, $edi, 5
+  ; CHECK: $xmm16 = VPINSRDZrmi                $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+  $xmm16 = VPINSRDZrmi                         $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+  ; CHECK: $xmm16 = VPINSRDZrri                $xmm16, $edi, 5
+  $xmm16 = VPINSRDZrri                         $xmm16, $edi, 5
+  ; CHECK: $xmm16 = VPINSRQZrmi                $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+  $xmm16 = VPINSRQZrmi                         $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+  ; CHECK: $xmm16 = VPINSRQZrri                $xmm16, $rdi, 5
+  $xmm16 = VPINSRQZrri                         $xmm16, $rdi, 5
+  ; CHECK: $xmm16 = VPINSRWZrmi                $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+  $xmm16 = VPINSRWZrmi                         $xmm16, $rsi, 1, $noreg, 0, $noreg, 3
+  ; CHECK: $xmm16 = VPINSRWZrri                $xmm16, $edi, 5
+  $xmm16 = VPINSRWZrri                         $xmm16, $edi, 5
   ; CHECK: $xmm16 = VSQRTSDZm                  $xmm16, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr
   $xmm16 = VSQRTSDZm                           $xmm16, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr
   ; CHECK: $xmm16 = VSQRTSDZm_Int              $xmm16, $rdi, 1, $noreg, 0, $noreg, implicit $mxcsr

diff  --git a/llvm/test/CodeGen/X86/opt_phis2.mir b/llvm/test/CodeGen/X86/opt_phis2.mir
index d528bc7d4e569e..f688e83fd333f2 100644
--- a/llvm/test/CodeGen/X86/opt_phis2.mir
+++ b/llvm/test/CodeGen/X86/opt_phis2.mir
@@ -50,12 +50,12 @@ body:             |
   bb.4:
     %3:vr256 = COPY %8
     %17:vr128 = VEXTRACTF128rri %8, 1
-    VPEXTRDmr %9, 1, $noreg, 12, $noreg, killed %17, 2
+    VPEXTRDmri %9, 1, $noreg, 12, $noreg, killed %17, 2
 
   bb.5:
     %4:vr256 = PHI %0, %bb.1, %3, %bb.4
     %18:vr128 = VEXTRACTF128rri %4, 1
-    VPEXTRDmr %9, 1, $noreg, 8, $noreg, killed %18, 1
+    VPEXTRDmri %9, 1, $noreg, 8, $noreg, killed %18, 1
 
   bb.6:
     %5:vr256 = PHI %1, %bb.2, %4, %bb.5
@@ -65,7 +65,7 @@ body:             |
   bb.7:
     %6:vr256 = PHI %2, %bb.3, %5, %bb.6
     %20:vr128 = COPY %6.sub_xmm
-    VPEXTRDmr %9, 1, $noreg, 0, $noreg, killed %20, 3
+    VPEXTRDmri %9, 1, $noreg, 0, $noreg, killed %20, 3
 
   bb.8:
     RET 0

diff  --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc
index 65e6ac59e05504..e85708ac1cc458 100644
--- a/llvm/test/TableGen/x86-fold-tables.inc
+++ b/llvm/test/TableGen/x86-fold-tables.inc
@@ -461,8 +461,8 @@ static const X86FoldTableEntry Table0[] = {
   {X86::MUL64r_NF, X86::MUL64m_NF, TB_FOLDED_LOAD},
   {X86::MUL8r, X86::MUL8m, TB_FOLDED_LOAD},
   {X86::MUL8r_NF, X86::MUL8m_NF, TB_FOLDED_LOAD},
-  {X86::PEXTRDrr, X86::PEXTRDmr, TB_FOLDED_STORE},
-  {X86::PEXTRQrr, X86::PEXTRQmr, TB_FOLDED_STORE},
+  {X86::PEXTRDrri, X86::PEXTRDmri, TB_FOLDED_STORE},
+  {X86::PEXTRQrri, X86::PEXTRQmri, TB_FOLDED_STORE},
   {X86::PTWRITE64r, X86::PTWRITE64m, TB_FOLDED_LOAD},
   {X86::PTWRITEr, X86::PTWRITEm, TB_FOLDED_LOAD},
   {X86::PUSH16r, X86::PUSH16rmm, TB_FOLDED_LOAD},
@@ -556,10 +556,10 @@ static const X86FoldTableEntry Table0[] = {
   {X86::VMOVUPSZ256rr, X86::VMOVUPSZ256mr, TB_FOLDED_STORE|TB_NO_REVERSE},
   {X86::VMOVUPSZrr, X86::VMOVUPSZmr, TB_FOLDED_STORE|TB_NO_REVERSE},
   {X86::VMOVUPSrr, X86::VMOVUPSmr, TB_FOLDED_STORE|TB_NO_REVERSE},
-  {X86::VPEXTRDZrr, X86::VPEXTRDZmr, TB_FOLDED_STORE},
-  {X86::VPEXTRDrr, X86::VPEXTRDmr, TB_FOLDED_STORE},
-  {X86::VPEXTRQZrr, X86::VPEXTRQZmr, TB_FOLDED_STORE},
-  {X86::VPEXTRQrr, X86::VPEXTRQmr, TB_FOLDED_STORE},
+  {X86::VPEXTRDZrri, X86::VPEXTRDZmri, TB_FOLDED_STORE},
+  {X86::VPEXTRDrri, X86::VPEXTRDmri, TB_FOLDED_STORE},
+  {X86::VPEXTRQZrri, X86::VPEXTRQZmri, TB_FOLDED_STORE},
+  {X86::VPEXTRQrri, X86::VPEXTRQmri, TB_FOLDED_STORE},
   {X86::VPMOVDBZrr, X86::VPMOVDBZmr, TB_FOLDED_STORE},
   {X86::VPMOVDWZ256rr, X86::VPMOVDWZ256mr, TB_FOLDED_STORE},
   {X86::VPMOVDWZrr, X86::VPMOVDWZmr, TB_FOLDED_STORE},
@@ -2165,7 +2165,7 @@ static const X86FoldTableEntry Table2[] = {
   {X86::MMX_PHSUBDrr, X86::MMX_PHSUBDrm, 0},
   {X86::MMX_PHSUBSWrr, X86::MMX_PHSUBSWrm, 0},
   {X86::MMX_PHSUBWrr, X86::MMX_PHSUBWrm, 0},
-  {X86::MMX_PINSRWrr, X86::MMX_PINSRWrm, TB_NO_REVERSE},
+  {X86::MMX_PINSRWrri, X86::MMX_PINSRWrmi, TB_NO_REVERSE},
   {X86::MMX_PMADDUBSWrr, X86::MMX_PMADDUBSWrm, 0},
   {X86::MMX_PMADDWDrr, X86::MMX_PMADDWDrm, 0},
   {X86::MMX_PMAXSWrr, X86::MMX_PMAXSWrm, 0},
@@ -2295,10 +2295,10 @@ static const X86FoldTableEntry Table2[] = {
   {X86::PHSUBDrr, X86::PHSUBDrm, TB_ALIGN_16},
   {X86::PHSUBSWrr, X86::PHSUBSWrm, TB_ALIGN_16},
   {X86::PHSUBWrr, X86::PHSUBWrm, TB_ALIGN_16},
-  {X86::PINSRBrr, X86::PINSRBrm, TB_NO_REVERSE},
-  {X86::PINSRDrr, X86::PINSRDrm, 0},
-  {X86::PINSRQrr, X86::PINSRQrm, 0},
-  {X86::PINSRWrr, X86::PINSRWrm, TB_NO_REVERSE},
+  {X86::PINSRBrri, X86::PINSRBrmi, TB_NO_REVERSE},
+  {X86::PINSRDrri, X86::PINSRDrmi, 0},
+  {X86::PINSRQrri, X86::PINSRQrmi, 0},
+  {X86::PINSRWrri, X86::PINSRWrmi, TB_NO_REVERSE},
   {X86::PMADDUBSWrr, X86::PMADDUBSWrm, TB_ALIGN_16},
   {X86::PMADDWDrr, X86::PMADDWDrm, TB_ALIGN_16},
   {X86::PMAXSBrr, X86::PMAXSBrm, TB_ALIGN_16},
@@ -3477,14 +3477,14 @@ static const X86FoldTableEntry Table2[] = {
   {X86::VPHSUBSWrr, X86::VPHSUBSWrm, 0},
   {X86::VPHSUBWYrr, X86::VPHSUBWYrm, 0},
   {X86::VPHSUBWrr, X86::VPHSUBWrm, 0},
-  {X86::VPINSRBZrr, X86::VPINSRBZrm, TB_NO_REVERSE},
-  {X86::VPINSRBrr, X86::VPINSRBrm, TB_NO_REVERSE},
-  {X86::VPINSRDZrr, X86::VPINSRDZrm, 0},
-  {X86::VPINSRDrr, X86::VPINSRDrm, 0},
-  {X86::VPINSRQZrr, X86::VPINSRQZrm, 0},
-  {X86::VPINSRQrr, X86::VPINSRQrm, 0},
-  {X86::VPINSRWZrr, X86::VPINSRWZrm, TB_NO_REVERSE},
-  {X86::VPINSRWrr, X86::VPINSRWrm, TB_NO_REVERSE},
+  {X86::VPINSRBZrri, X86::VPINSRBZrmi, TB_NO_REVERSE},
+  {X86::VPINSRBrri, X86::VPINSRBrmi, TB_NO_REVERSE},
+  {X86::VPINSRDZrri, X86::VPINSRDZrmi, 0},
+  {X86::VPINSRDrri, X86::VPINSRDrmi, 0},
+  {X86::VPINSRQZrri, X86::VPINSRQZrmi, 0},
+  {X86::VPINSRQrri, X86::VPINSRQrmi, 0},
+  {X86::VPINSRWZrri, X86::VPINSRWZrmi, TB_NO_REVERSE},
+  {X86::VPINSRWrri, X86::VPINSRWrmi, TB_NO_REVERSE},
   {X86::VPLZCNTDZ128rrkz, X86::VPLZCNTDZ128rmkz, 0},
   {X86::VPLZCNTDZ256rrkz, X86::VPLZCNTDZ256rmkz, 0},
   {X86::VPLZCNTDZrrkz, X86::VPLZCNTDZrmkz, 0},


        


More information about the llvm-commits mailing list