[llvm] b80ae65 - [X86][mem-fold] Remove definition of NotMemoryFoldable and move code into a def file, NFCI

Shengchen Kan via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 5 06:28:54 PDT 2023


Author: Shengchen Kan
Date: 2023-04-05T21:28:31+08:00
New Revision: b80ae6548cd3431358d4a59a3ee4ed212f16164b

URL: https://github.com/llvm/llvm-project/commit/b80ae6548cd3431358d4a59a3ee4ed212f16164b
DIFF: https://github.com/llvm/llvm-project/commit/b80ae6548cd3431358d4a59a3ee4ed212f16164b.diff

LOG: [X86][mem-fold] Remove definition of NotMemoryFoldable and move code into a def file, NFCI

The goal is to centralize the logic of the memory fold.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86InstrAVX512.td
    llvm/lib/Target/X86/X86InstrControl.td
    llvm/lib/Target/X86/X86InstrExtension.td
    llvm/lib/Target/X86/X86InstrFormats.td
    llvm/lib/Target/X86/X86InstrKL.td
    llvm/lib/Target/X86/X86InstrMMX.td
    llvm/lib/Target/X86/X86InstrMisc.td
    llvm/lib/Target/X86/X86InstrSSE.td
    llvm/lib/Target/X86/X86InstrSystem.td
    llvm/lib/Target/X86/X86InstrVMX.td
    llvm/utils/TableGen/X86FoldTablesEmitter.cpp
    llvm/utils/TableGen/X86ManualFoldTables.def

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 8468e16e8641..7ac7ef3a2a0d 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -883,7 +883,7 @@ multiclass vextract_for_size_split<int Opcode,
                      "vextract" # To.EltTypeName # "x" # To.NumElts #
                           "\t{$idx, $src1, $dst {${mask}}|"
                           "$dst {${mask}}, $src1, $idx}", []>,
-                    EVEX_K, EVEX, Sched<[SchedMR]>, NotMemoryFoldable;
+                    EVEX_K, EVEX, Sched<[SchedMR]>;
   }
 }
 
@@ -1998,7 +1998,7 @@ multiclass WriteFVarBlendask<bits<8> opc, string OpcodeStr,
              (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
              !strconcat(OpcodeStr,
              "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
-             []>, EVEX_4V, EVEX_KZ, Sched<[sched]>, NotMemoryFoldable;
+             []>, EVEX_4V, EVEX_KZ, Sched<[sched]>;
   let mayLoad = 1 in {
   def rm  : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
              (ins _.RC:$src1, _.MemOp:$src2),
@@ -2017,7 +2017,7 @@ multiclass WriteFVarBlendask<bits<8> opc, string OpcodeStr,
              !strconcat(OpcodeStr,
              "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
              []>, EVEX_4V, EVEX_KZ, EVEX_CD8<_.EltSize, CD8VF>,
-             Sched<[sched.Folded, sched.ReadAfterFold]>, NotMemoryFoldable;
+             Sched<[sched.Folded, sched.ReadAfterFold]>;
   }
   }
 }
@@ -2038,7 +2038,7 @@ multiclass WriteFVarBlendask_rmb<bits<8> opc, string OpcodeStr,
             "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}} {z}|",
             "$dst {${mask}} {z}, $src1, ${src2}", _.BroadcastStr, "}"), []>,
       EVEX_4V, EVEX_KZ, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>,
-      Sched<[sched.Folded, sched.ReadAfterFold]>, NotMemoryFoldable;
+      Sched<[sched.Folded, sched.ReadAfterFold]>;
 
   def rmb : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
       (ins _.RC:$src1, _.ScalarMemOp:$src2),
@@ -3561,8 +3561,7 @@ multiclass avx512_store<bits<8> opc, string OpcodeStr, string BaseName,
   def mrk : AVX512PI<opc, MRMDestMem, (outs),
                      (ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src),
               OpcodeStr # "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}",
-               [], _.ExeDomain>, EVEX, EVEX_K, Sched<[Sched.MR]>,
-               NotMemoryFoldable;
+               [], _.ExeDomain>, EVEX, EVEX_K, Sched<[Sched.MR]>;
 
   def: Pat<(mstore (_.VT _.RC:$src), addr:$ptr, _.KRCWM:$mask),
            (!cast<Instruction>(BaseName#_.ZSuffix#mrk) addr:$ptr,
@@ -4198,8 +4197,7 @@ multiclass avx512_move_scalar<string asm, SDNode OpNode, PatFrag vzload_frag,
   def mrk: AVX512PI<0x11, MRMDestMem, (outs),
               (ins _.ScalarMemOp:$dst, VK1WM:$mask, _.RC:$src),
               !strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
-              [], _.ExeDomain>, EVEX, EVEX_K, Sched<[WriteFStore]>,
-              NotMemoryFoldable;
+              [], _.ExeDomain>, EVEX, EVEX_K, Sched<[WriteFStore]>;
   }
 }
 
@@ -6734,7 +6732,7 @@ def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
           (ins VR128X:$src1, VR128X:$src2),
           "vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
           [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))]>,
-          Sched<[SchedWriteFShuffle.XMM]>, EVEX_4V, NotMemoryFoldable;
+          Sched<[SchedWriteFShuffle.XMM]>, EVEX_4V;
 
 //===----------------------------------------------------------------------===//
 // VMOVHPS/PD VMOVLPS Instructions
@@ -9232,7 +9230,7 @@ let ExeDomain = GenericDomain, Uses = [MXCSR], mayRaiseFPException = 1 in {
     def mrk : AVX512AIi8<0x1D, MRMDestMem, (outs),
                (ins x86memop:$dst, _dest.KRCWM:$mask, _src.RC:$src1, i32u8imm:$src2),
                "vcvtps2ph\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}", []>,
-                EVEX_K, Sched<[MR]>, NotMemoryFoldable;
+                EVEX_K, Sched<[MR]>;
   }
 }
 }
@@ -9915,7 +9913,7 @@ multiclass avx512_trunc_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
     def mrk : AVX512XS8I<opc, MRMDestMem, (outs),
                (ins x86memop:$dst, SrcInfo.KRCWM:$mask, SrcInfo.RC:$src),
                OpcodeStr # "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}", []>,
-               EVEX, EVEX_K, Sched<[sched.Folded]>, NotMemoryFoldable;
+               EVEX, EVEX_K, Sched<[sched.Folded]>;
   }//mayStore = 1, hasSideEffects = 0
 }
 
@@ -10748,13 +10746,13 @@ multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr,
 
 // FIXME: Is there a better scheduler class for VPCOMPRESS?
 defm VPCOMPRESSD : compress_by_elt_width <0x8B, "vpcompressd", WriteVarShuffle256,
-                                          avx512vl_i32_info>, EVEX, NotMemoryFoldable;
+                                          avx512vl_i32_info>, EVEX;
 defm VPCOMPRESSQ : compress_by_elt_width <0x8B, "vpcompressq", WriteVarShuffle256,
-                                          avx512vl_i64_info>, EVEX, REX_W, NotMemoryFoldable;
+                                          avx512vl_i64_info>, EVEX, REX_W;
 defm VCOMPRESSPS : compress_by_elt_width <0x8A, "vcompressps", WriteVarShuffle256,
-                                          avx512vl_f32_info>, EVEX, NotMemoryFoldable;
+                                          avx512vl_f32_info>, EVEX;
 defm VCOMPRESSPD : compress_by_elt_width <0x8A, "vcompresspd", WriteVarShuffle256,
-                                          avx512vl_f64_info>, EVEX, REX_W, NotMemoryFoldable;
+                                          avx512vl_f64_info>, EVEX, REX_W;
 
 // expand
 multiclass expand_by_vec_width<bits<8> opc, X86VectorVTInfo _,
@@ -12560,11 +12558,9 @@ defm VPSHRD  : VBMI2_shift_imm<0x72, 0x73, "vpshrd", X86VShrd, SchedWriteVecIMul
 
 // Compress
 defm VPCOMPRESSB : compress_by_elt_width<0x63, "vpcompressb", WriteVarShuffle256,
-                                         avx512vl_i8_info, HasVBMI2>, EVEX,
-                                         NotMemoryFoldable;
+                                         avx512vl_i8_info, HasVBMI2>, EVEX;
 defm VPCOMPRESSW : compress_by_elt_width <0x63, "vpcompressw", WriteVarShuffle256,
-                                          avx512vl_i16_info, HasVBMI2>, EVEX, REX_W,
-                                          NotMemoryFoldable;
+                                          avx512vl_i16_info, HasVBMI2>, EVEX, REX_W;
 // Expand
 defm VPEXPANDB : expand_by_elt_width <0x62, "vpexpandb", WriteVarShuffle256,
                                       avx512vl_i8_info, HasVBMI2>, EVEX;

diff  --git a/llvm/lib/Target/X86/X86InstrControl.td b/llvm/lib/Target/X86/X86InstrControl.td
index 8687990075e4..2a0801a6519c 100644
--- a/llvm/lib/Target/X86/X86InstrControl.td
+++ b/llvm/lib/Target/X86/X86InstrControl.td
@@ -273,9 +273,9 @@ let isCall = 1 in
 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
     isCodeGenOnly = 1, Uses = [ESP, SSP] in {
   def TCRETURNdi : PseudoI<(outs), (ins i32imm_brtarget:$dst, i32imm:$offset),
-                           []>, Sched<[WriteJump]>, NotMemoryFoldable;
+                           []>, Sched<[WriteJump]>;
   def TCRETURNri : PseudoI<(outs), (ins ptr_rc_tailcall:$dst, i32imm:$offset),
-                           []>, Sched<[WriteJump]>, NotMemoryFoldable;
+                           []>, Sched<[WriteJump]>;
   let mayLoad = 1 in
   def TCRETURNmi : PseudoI<(outs), (ins i32mem_TC:$dst, i32imm:$offset),
                            []>, Sched<[WriteJumpLd]>;
@@ -350,11 +350,11 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
                                []>, Sched<[WriteJump]>;
   def TCRETURNri64   : PseudoI<(outs),
                                (ins ptr_rc_tailcall:$dst, i32imm:$offset),
-                               []>, Sched<[WriteJump]>, NotMemoryFoldable;
+                               []>, Sched<[WriteJump]>;
   let mayLoad = 1 in
   def TCRETURNmi64   : PseudoI<(outs),
                                (ins i64mem_TC:$dst, i32imm:$offset),
-                               []>, Sched<[WriteJumpLd]>, NotMemoryFoldable;
+                               []>, Sched<[WriteJumpLd]>;
 
   def TAILJMPd64 : PseudoI<(outs), (ins i64i32imm_brtarget:$dst),
                            []>, Sched<[WriteJump]>;

diff  --git a/llvm/lib/Target/X86/X86InstrExtension.td b/llvm/lib/Target/X86/X86InstrExtension.td
index 8d3fce7f55bc..46554dfc5167 100644
--- a/llvm/lib/Target/X86/X86InstrExtension.td
+++ b/llvm/lib/Target/X86/X86InstrExtension.td
@@ -93,17 +93,17 @@ def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
 def MOVSX16rr16: I<0xBF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
                    "movs{ww|x}\t{$src, $dst|$dst, $src}",
-                   []>, TB, OpSize16, Sched<[WriteALU]>, NotMemoryFoldable;
+                   []>, TB, OpSize16, Sched<[WriteALU]>;
 def MOVZX16rr16: I<0xB7, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
                    "movz{ww|x}\t{$src, $dst|$dst, $src}",
-                   []>, TB, OpSize16, Sched<[WriteALU]>, NotMemoryFoldable;
+                   []>, TB, OpSize16, Sched<[WriteALU]>;
 let mayLoad = 1 in {
 def MOVSX16rm16: I<0xBF, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
                    "movs{ww|x}\t{$src, $dst|$dst, $src}",
-                   []>, OpSize16, TB, Sched<[WriteLoad]>, NotMemoryFoldable;
+                   []>, OpSize16, TB, Sched<[WriteLoad]>;
 def MOVZX16rm16: I<0xB7, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
                    "movz{ww|x}\t{$src, $dst|$dst, $src}",
-                   []>, TB, OpSize16, Sched<[WriteLoad]>, NotMemoryFoldable;
+                   []>, TB, OpSize16, Sched<[WriteLoad]>;
 } // mayLoad = 1
 } // isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0
 

diff  --git a/llvm/lib/Target/X86/X86InstrFormats.td b/llvm/lib/Target/X86/X86InstrFormats.td
index dffb25ad6509..6d278d6f838d 100644
--- a/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/llvm/lib/Target/X86/X86InstrFormats.td
@@ -270,9 +270,6 @@ class EVEX2VEXOverride<string VEXInstrName> {
   string EVEX2VEXOverride = VEXInstrName;
 }
 
-// Mark the instruction as "illegal to memory fold/unfold"
-class NotMemoryFoldable { bit isMemoryFoldable = 0; }
-
 // Prevent EVEX->VEX conversion from considering this instruction.
 class NotEVEX2VEXConvertible { bit notEVEX2VEXConvertible = 1; }
 
@@ -362,7 +359,6 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
   // Used to prevent an explicit EVEX2VEX override for this instruction.
   string EVEX2VEXOverride = ?;
 
-  bit isMemoryFoldable = 1;     // Is it allowed to memory fold/unfold this instruction?
   bit notEVEX2VEXConvertible = 0; // Prevent EVEX->VEX conversion.
   bit ExplicitVEXPrefix = 0; // Force the instruction to use VEX encoding.
   // Force to check predicate before compress EVEX to VEX encoding.

diff  --git a/llvm/lib/Target/X86/X86InstrKL.td b/llvm/lib/Target/X86/X86InstrKL.td
index a716aab4260b..a3392b691c0a 100644
--- a/llvm/lib/Target/X86/X86InstrKL.td
+++ b/llvm/lib/Target/X86/X86InstrKL.td
@@ -19,20 +19,17 @@ let SchedRW = [WriteSystem], Predicates = [HasKL] in {
   let Uses = [XMM0, EAX], Defs = [EFLAGS] in {
     def LOADIWKEY : I<0xDC, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
                       "loadiwkey\t{$src2, $src1|$src1, $src2}",
-                      [(int_x86_loadiwkey XMM0, VR128:$src1, VR128:$src2, EAX)]>, T8XS,
-                      NotMemoryFoldable;
+                      [(int_x86_loadiwkey XMM0, VR128:$src1, VR128:$src2, EAX)]>, T8XS;
   }
 
   let Uses = [XMM0], Defs = [XMM0, XMM1, XMM2, XMM4, XMM5, XMM6, EFLAGS] in {
     def ENCODEKEY128 : I<0xFA, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
-                         "encodekey128\t{$src, $dst|$dst, $src}", []>, T8XS,
-                       NotMemoryFoldable;
+                         "encodekey128\t{$src, $dst|$dst, $src}", []>, T8XS;
   }
 
   let Uses = [XMM0, XMM1], Defs = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, EFLAGS] in {
     def ENCODEKEY256 : I<0xFB, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
-                         "encodekey256\t{$src, $dst|$dst, $src}", []>, T8XS,
-                       NotMemoryFoldable;
+                         "encodekey256\t{$src, $dst|$dst, $src}", []>, T8XS;
   }
 
   let Constraints = "$src1 = $dst",
@@ -40,26 +37,22 @@ let SchedRW = [WriteSystem], Predicates = [HasKL] in {
    def AESENC128KL : I<0xDC, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, opaquemem:$src2),
                        "aesenc128kl\t{$src2, $src1|$src1, $src2}",
                        [(set VR128:$dst, EFLAGS,
-                         (X86aesenc128kl VR128:$src1, addr:$src2))]>, T8XS,
-                       NotMemoryFoldable;
+                         (X86aesenc128kl VR128:$src1, addr:$src2))]>, T8XS;
 
    def AESDEC128KL : I<0xDD, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, opaquemem:$src2),
                        "aesdec128kl\t{$src2, $src1|$src1, $src2}",
                        [(set VR128:$dst, EFLAGS,
-                         (X86aesdec128kl VR128:$src1, addr:$src2))]>, T8XS,
-                       NotMemoryFoldable;
+                         (X86aesdec128kl VR128:$src1, addr:$src2))]>, T8XS;
 
    def AESENC256KL : I<0xDE, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, opaquemem:$src2),
                        "aesenc256kl\t{$src2, $src1|$src1, $src2}",
                        [(set VR128:$dst, EFLAGS,
-                         (X86aesenc256kl VR128:$src1, addr:$src2))]>, T8XS,
-                       NotMemoryFoldable;
+                         (X86aesenc256kl VR128:$src1, addr:$src2))]>, T8XS;
 
    def AESDEC256KL : I<0xDF, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, opaquemem:$src2),
                        "aesdec256kl\t{$src2, $src1|$src1, $src2}",
                        [(set VR128:$dst, EFLAGS,
-                         (X86aesdec256kl VR128:$src1, addr:$src2))]>, T8XS,
-                       NotMemoryFoldable;
+                         (X86aesdec256kl VR128:$src1, addr:$src2))]>, T8XS;
   }
 
 } // SchedRW, Predicates
@@ -69,17 +62,13 @@ let SchedRW = [WriteSystem], Predicates = [HasWIDEKL] in {
       Defs = [EFLAGS, XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7],
       mayLoad = 1 in {
     def AESENCWIDE128KL : I<0xD8, MRM0m, (outs), (ins opaquemem:$src),
-                            "aesencwide128kl\t$src", []>, T8XS,
-                            NotMemoryFoldable;
+                            "aesencwide128kl\t$src", []>, T8XS;
     def AESDECWIDE128KL : I<0xD8, MRM1m, (outs), (ins opaquemem:$src),
-                            "aesdecwide128kl\t$src", []>, T8XS,
-                            NotMemoryFoldable;
+                            "aesdecwide128kl\t$src", []>, T8XS;
     def AESENCWIDE256KL : I<0xD8, MRM2m, (outs), (ins opaquemem:$src),
-                            "aesencwide256kl\t$src", []>, T8XS,
-                            NotMemoryFoldable;
+                            "aesencwide256kl\t$src", []>, T8XS;
     def AESDECWIDE256KL : I<0xD8, MRM3m, (outs), (ins opaquemem:$src),
-                            "aesdecwide256kl\t$src", []>, T8XS,
-                            NotMemoryFoldable;
+                            "aesdecwide256kl\t$src", []>, T8XS;
   }
 
 } // SchedRW, Predicates

diff  --git a/llvm/lib/Target/X86/X86InstrMMX.td b/llvm/lib/Target/X86/X86InstrMMX.td
index 954ebe8a53a2..55ed2a3ab3b5 100644
--- a/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/llvm/lib/Target/X86/X86InstrMMX.td
@@ -211,7 +211,7 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
 def MMX_MOVD64from64mr : MMXRI<0x7E, MRMDestMem,
                                (outs), (ins i64mem:$dst, VR64:$src),
                                "movq\t{$src, $dst|$dst, $src}", []>,
-                               Sched<[SchedWriteVecMoveLS.MMX.MR]>, NotMemoryFoldable;
+                               Sched<[SchedWriteVecMoveLS.MMX.MR]>;
 
 let SchedRW = [SchedWriteVecMoveLS.MMX.RM] in {
 let canFoldAsLoad = 1 in

diff  --git a/llvm/lib/Target/X86/X86InstrMisc.td b/llvm/lib/Target/X86/X86InstrMisc.td
index 84cc8aa6a7bd..c4baefd1ad53 100644
--- a/llvm/lib/Target/X86/X86InstrMisc.td
+++ b/llvm/lib/Target/X86/X86InstrMisc.td
@@ -18,20 +18,18 @@
 let hasSideEffects = 0, SchedRW = [WriteNop] in {
   def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
   def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
-                "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
+                "nop{w}\t$zero", []>, TB, OpSize16;
   def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero),
-                "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
+                "nop{l}\t$zero", []>, TB, OpSize32;
   def NOOPQ : RI<0x1f, MRMXm, (outs), (ins i64mem:$zero),
-                "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
-                Requires<[In64BitMode]>;
+                "nop{q}\t$zero", []>, TB, Requires<[In64BitMode]>;
   // Also allow register so we can assemble/disassemble
   def NOOPWr : I<0x1f, MRMXr, (outs), (ins GR16:$zero),
-                 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
+                 "nop{w}\t$zero", []>, TB, OpSize16;
   def NOOPLr : I<0x1f, MRMXr, (outs), (ins GR32:$zero),
-                 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
+                 "nop{l}\t$zero", []>, TB, OpSize32;
   def NOOPQr : RI<0x1f, MRMXr, (outs), (ins GR64:$zero),
-                  "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
-                  Requires<[In64BitMode]>;
+                  "nop{q}\t$zero", []>, TB, Requires<[In64BitMode]>;
 }
 
 
@@ -67,9 +65,9 @@ def POP32r  : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
 // Long form for the disassembler.
 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
 def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
-                OpSize16, NotMemoryFoldable;
+                OpSize16;
 def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
-                OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
+                OpSize32, Requires<[Not64BitMode]>;
 } // isCodeGenOnly = 1, ForceDisassemble = 1
 } // mayLoad, SchedRW
 let mayStore = 1, mayLoad = 1, SchedRW = [WriteCopy] in {
@@ -87,9 +85,9 @@ def PUSH32r  : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
 // Long form for the disassembler.
 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
 def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
-                 OpSize16, NotMemoryFoldable;
+                 OpSize16;
 def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
-                 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
+                 OpSize32, Requires<[Not64BitMode]>;
 } // isCodeGenOnly = 1, ForceDisassemble = 1
 
 def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm),
@@ -161,7 +159,7 @@ def POP64r   : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
 // Long form for the disassembler.
 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
-                OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
+                OpSize32, Requires<[In64BitMode]>;
 } // isCodeGenOnly = 1, ForceDisassemble = 1
 } // mayLoad, SchedRW
 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in
@@ -173,7 +171,7 @@ def PUSH64r  : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
 // Long form for the disassembler.
 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
-                 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
+                 OpSize32, Requires<[In64BitMode]>;
 } // isCodeGenOnly = 1, ForceDisassemble = 1
 } // mayStore, SchedRW
 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
@@ -609,15 +607,14 @@ let SchedRW = [WriteBitTest] in {
 def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
                "bt{w}\t{$src2, $src1|$src1, $src2}",
                [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>,
-               OpSize16, TB, NotMemoryFoldable;
+               OpSize16, TB;
 def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
                "bt{l}\t{$src2, $src1|$src1, $src2}",
                [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>,
-               OpSize32, TB, NotMemoryFoldable;
+               OpSize32, TB;
 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
                "bt{q}\t{$src2, $src1|$src1, $src2}",
-               [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB,
-               NotMemoryFoldable;
+               [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
 } // SchedRW
 
 // Unlike with the register+register form, the memory+register form of the
@@ -629,13 +626,13 @@ def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
 let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteBitTestRegLd] in {
   def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
                  "bt{w}\t{$src2, $src1|$src1, $src2}",
-                 []>, OpSize16, TB, NotMemoryFoldable;
+                 []>, OpSize16, TB;
   def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
                  "bt{l}\t{$src2, $src1|$src1, $src2}",
-                 []>, OpSize32, TB, NotMemoryFoldable;
+                 []>, OpSize32, TB;
   def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
                  "bt{q}\t{$src2, $src1|$src1, $src2}",
-                  []>, TB, NotMemoryFoldable;
+                  []>, TB;
 }
 
 let SchedRW = [WriteBitTest] in {
@@ -676,25 +673,23 @@ let hasSideEffects = 0 in {
 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
 def BTC16rr : I<0xBB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
                 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
-                OpSize16, TB, NotMemoryFoldable;
+                OpSize16, TB;
 def BTC32rr : I<0xBB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
                 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
-                OpSize32, TB, NotMemoryFoldable;
+                OpSize32, TB;
 def BTC64rr : RI<0xBB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
-                 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
-                 NotMemoryFoldable;
+                 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
 } // SchedRW
 
 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
 def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
                 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
-                OpSize16, TB, NotMemoryFoldable;
+                OpSize16, TB;
 def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
                 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
-                OpSize32, TB, NotMemoryFoldable;
+                OpSize32, TB;
 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
-                 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
-                 NotMemoryFoldable;
+                 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
 }
 
 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
@@ -719,25 +714,23 @@ def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
 def BTR16rr : I<0xB3, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
                 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
-                OpSize16, TB, NotMemoryFoldable;
+                OpSize16, TB;
 def BTR32rr : I<0xB3, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
                 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
-                OpSize32, TB, NotMemoryFoldable;
+                OpSize32, TB;
 def BTR64rr : RI<0xB3, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
-                 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
-                 NotMemoryFoldable;
+                 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
 } // SchedRW
 
 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
 def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
                 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
-                OpSize16, TB, NotMemoryFoldable;
+                OpSize16, TB;
 def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
                 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
-                OpSize32, TB, NotMemoryFoldable;
+                OpSize32, TB;
 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
-                 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
-                 NotMemoryFoldable;
+                 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
 }
 
 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
@@ -766,25 +759,23 @@ def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
 def BTS16rr : I<0xAB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
                 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
-                OpSize16, TB, NotMemoryFoldable;
+                OpSize16, TB;
 def BTS32rr : I<0xAB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
                 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
-              OpSize32, TB, NotMemoryFoldable;
+              OpSize32, TB;
 def BTS64rr : RI<0xAB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
-               "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
-               NotMemoryFoldable;
+               "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
 } // SchedRW
 
 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
 def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
               "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
-              OpSize16, TB, NotMemoryFoldable;
+              OpSize16, TB;
 def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
               "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
-              OpSize32, TB, NotMemoryFoldable;
+              OpSize32, TB;
 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
-                 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
-                 NotMemoryFoldable;
+                 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
 }
 
 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
@@ -846,25 +837,25 @@ multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag>
   }
 }
 
-defm XCHG    : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap">, NotMemoryFoldable;
+defm XCHG    : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap">;
 
 // Swap between registers.
 let SchedRW = [WriteXCHG] in {
 let Constraints = "$src1 = $dst1, $src2 = $dst2", hasSideEffects = 0 in {
 def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst1, GR8:$dst2),
                 (ins GR8:$src1, GR8:$src2),
-                "xchg{b}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
+                "xchg{b}\t{$src2, $src1|$src1, $src2}", []>;
 def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst1, GR16:$dst2),
                  (ins GR16:$src1, GR16:$src2),
                  "xchg{w}\t{$src2, $src1|$src1, $src2}", []>,
-                 OpSize16, NotMemoryFoldable;
+                 OpSize16;
 def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst1, GR32:$dst2),
                  (ins GR32:$src1, GR32:$src2),
                  "xchg{l}\t{$src2, $src1|$src1, $src2}", []>,
-                 OpSize32, NotMemoryFoldable;
+                 OpSize32;
 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst1, GR64:$dst2),
                   (ins GR64:$src1 ,GR64:$src2),
-                  "xchg{q}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
+                  "xchg{q}\t{$src2, $src1|$src1, $src2}", []>;
 }
 
 // Swap between EAX and other registers.
@@ -919,40 +910,32 @@ def XADD64rm  : RI<0xC1, MRMSrcMem, (outs GR64:$dst),
 let SchedRW = [WriteCMPXCHG], hasSideEffects = 0 in {
 let Defs = [AL, EFLAGS], Uses = [AL] in
 def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
-                   "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
-                   NotMemoryFoldable;
+                   "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB;
 let Defs = [AX, EFLAGS], Uses = [AX] in
 def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
-                    "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
-                    NotMemoryFoldable;
+                    "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16;
 let Defs = [EAX, EFLAGS], Uses = [EAX] in
 def CMPXCHG32rr  : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
-                     "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
-                     NotMemoryFoldable;
+                     "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32;
 let Defs = [RAX, EFLAGS], Uses = [RAX] in
 def CMPXCHG64rr  : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
-                      "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
-                      NotMemoryFoldable;
+                      "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
 } // SchedRW, hasSideEffects
 
 let SchedRW = [WriteCMPXCHGRMW], mayLoad = 1, mayStore = 1,
     hasSideEffects = 0 in {
 let Defs = [AL, EFLAGS], Uses = [AL] in
 def CMPXCHG8rm   : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
-                     "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
-                     NotMemoryFoldable;
+                     "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB;
 let Defs = [AX, EFLAGS], Uses = [AX] in
 def CMPXCHG16rm  : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
-                     "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
-                     NotMemoryFoldable;
+                     "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16;
 let Defs = [EAX, EFLAGS], Uses = [EAX] in
 def CMPXCHG32rm  : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
-                     "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
-                     NotMemoryFoldable;
+                     "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32;
 let Defs = [RAX, EFLAGS], Uses = [RAX] in
 def CMPXCHG64rm  : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
-                      "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
-                      NotMemoryFoldable;
+                      "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
 
 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
 def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
@@ -1087,11 +1070,11 @@ def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
 // Adjust RPL Field of Segment Selector
 def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
                  "arpl\t{$src, $dst|$dst, $src}", []>,
-                 Requires<[Not64BitMode]>, NotMemoryFoldable;
+                 Requires<[Not64BitMode]>;
 let mayStore = 1 in
 def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
                  "arpl\t{$src, $dst|$dst, $src}", []>,
-                 Requires<[Not64BitMode]>, NotMemoryFoldable;
+                 Requires<[Not64BitMode]>;
 } // SchedRW
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index b50534c4e597..102cf5aa309c 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -835,8 +835,7 @@ let Predicates = [UseAVX] in {
                       "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                       [(set VR128:$dst,
                         (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))]>,
-                      VEX_4V, Sched<[SchedWriteFShuffle.XMM]>, VEX_WIG,
-                      NotMemoryFoldable;
+                      VEX_4V, Sched<[SchedWriteFShuffle.XMM]>, VEX_WIG;
 }
 let Constraints = "$src1 = $dst" in {
   def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
@@ -851,7 +850,7 @@ let Constraints = "$src1 = $dst" in {
                       "movhlps\t{$src2, $dst|$dst, $src2}",
                       [(set VR128:$dst,
                         (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))]>,
-                      Sched<[SchedWriteFShuffle.XMM]>, NotMemoryFoldable;
+                      Sched<[SchedWriteFShuffle.XMM]>;
 }
 
 //===----------------------------------------------------------------------===//
@@ -3026,7 +3025,7 @@ multiclass sse1_fp_unop_s_intr<string OpcodeStr, Predicate AVXTarget> {
   defm V#NAME#SS  : avx_fp_unop_s_intr<v4f32, sse_load_f32,
                       !cast<Intrinsic>("int_x86_sse_"#OpcodeStr#_ss),
                       AVXTarget>,
-                      XS, VEX_4V, VEX_LIG, VEX_WIG, NotMemoryFoldable;
+                      XS, VEX_4V, VEX_LIG, VEX_WIG;
 }
 
 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,

diff  --git a/llvm/lib/Target/X86/X86InstrSystem.td b/llvm/lib/Target/X86/X86InstrSystem.td
index ca981f58908e..69ddc876bbe4 100644
--- a/llvm/lib/Target/X86/X86InstrSystem.td
+++ b/llvm/lib/Target/X86/X86InstrSystem.td
@@ -216,43 +216,43 @@ def SWAPGS : I<0x01, MRM_F8, (outs), (ins), "swapgs", []>, TB;
 let mayLoad = 1 in
 def LAR16rm : I<0x02, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
                 "lar{w}\t{$src, $dst|$dst, $src}", []>, TB,
-                OpSize16, NotMemoryFoldable;
+                OpSize16;
 def LAR16rr : I<0x02, MRMSrcReg, (outs GR16:$dst), (ins GR16orGR32orGR64:$src),
                 "lar{w}\t{$src, $dst|$dst, $src}", []>, TB,
-                OpSize16, NotMemoryFoldable;
+                OpSize16;
 
 let mayLoad = 1 in
 def LAR32rm : I<0x02, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
                 "lar{l}\t{$src, $dst|$dst, $src}", []>, TB,
-                OpSize32, NotMemoryFoldable;
+                OpSize32;
 def LAR32rr : I<0x02, MRMSrcReg, (outs GR32:$dst), (ins GR16orGR32orGR64:$src),
                 "lar{l}\t{$src, $dst|$dst, $src}", []>, TB,
-                OpSize32, NotMemoryFoldable;
+                OpSize32;
 let mayLoad = 1 in
 def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
-                 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB, NotMemoryFoldable;
+                 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
 def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR16orGR32orGR64:$src),
-                 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB, NotMemoryFoldable;
+                 "lar{q}\t{$src, $dst|$dst, $src}", []>, TB;
 
 let mayLoad = 1 in
 def LSL16rm : I<0x03, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
                 "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB,
-                OpSize16, NotMemoryFoldable;
+                OpSize16;
 def LSL16rr : I<0x03, MRMSrcReg, (outs GR16:$dst), (ins GR16orGR32orGR64:$src),
                 "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB,
-                OpSize16, NotMemoryFoldable;
+                OpSize16;
 let mayLoad = 1 in
 def LSL32rm : I<0x03, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
                 "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB,
-                OpSize32, NotMemoryFoldable;
+                OpSize32;
 def LSL32rr : I<0x03, MRMSrcReg, (outs GR32:$dst), (ins GR16orGR32orGR64:$src),
                 "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB,
-                OpSize32, NotMemoryFoldable;
+                OpSize32;
 let mayLoad = 1 in
 def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
-                 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB, NotMemoryFoldable;
+                 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
 def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR16orGR32orGR64:$src),
-                 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB, NotMemoryFoldable;
+                 "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB;
 
 def INVLPG : I<0x01, MRM7m, (outs), (ins i8mem:$addr), "invlpg\t$addr", []>, TB;
 
@@ -265,9 +265,9 @@ def STR64r : RI<0x00, MRM1r, (outs GR64:$dst), (ins),
 let mayStore = 1 in
 def STRm   : I<0x00, MRM1m, (outs), (ins i16mem:$dst), "str{w}\t$dst", []>, TB;
 
-def LTRr : I<0x00, MRM3r, (outs), (ins GR16:$src), "ltr{w}\t$src", []>, TB, NotMemoryFoldable;
+def LTRr : I<0x00, MRM3r, (outs), (ins GR16:$src), "ltr{w}\t$src", []>, TB;
 let mayLoad = 1 in
-def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src), "ltr{w}\t$src", []>, TB, NotMemoryFoldable;
+def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src), "ltr{w}\t$src", []>, TB;
 
 def PUSHCS16 : I<0x0E, RawFrm, (outs), (ins), "push{w}\t{%cs|cs}", []>,
                  OpSize16, Requires<[Not64BitMode]>;
@@ -364,11 +364,11 @@ def LGS32rm : I<0xb5, MRMSrcMem, (outs GR32:$dst), (ins opaquemem:$src),
 def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaquemem:$src),
                  "lgs{q}\t{$src, $dst|$dst, $src}", []>, TB;
 
-def VERRr : I<0x00, MRM4r, (outs), (ins GR16:$seg), "verr\t$seg", []>, TB, NotMemoryFoldable;
-def VERWr : I<0x00, MRM5r, (outs), (ins GR16:$seg), "verw\t$seg", []>, TB, NotMemoryFoldable;
+def VERRr : I<0x00, MRM4r, (outs), (ins GR16:$seg), "verr\t$seg", []>, TB;
+def VERWr : I<0x00, MRM5r, (outs), (ins GR16:$seg), "verw\t$seg", []>, TB;
 let mayLoad = 1 in {
-def VERRm : I<0x00, MRM4m, (outs), (ins i16mem:$seg), "verr\t$seg", []>, TB, NotMemoryFoldable;
-def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg), "verw\t$seg", []>, TB, NotMemoryFoldable;
+def VERRm : I<0x00, MRM4m, (outs), (ins i16mem:$seg), "verr\t$seg", []>, TB;
+def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg), "verw\t$seg", []>, TB;
 }
 } // SchedRW
 
@@ -414,10 +414,10 @@ def LIDT32m : I<0x01, MRM3m, (outs), (ins opaquemem:$src),
 def LIDT64m : I<0x01, MRM3m, (outs), (ins opaquemem:$src),
                 "lidt{q}\t$src", []>, TB, Requires<[In64BitMode]>;
 def LLDT16r : I<0x00, MRM2r, (outs), (ins GR16:$src),
-                "lldt{w}\t$src", []>, TB, NotMemoryFoldable;
+                "lldt{w}\t$src", []>, TB;
 let mayLoad = 1 in
 def LLDT16m : I<0x00, MRM2m, (outs), (ins i16mem:$src),
-                "lldt{w}\t$src", []>, TB, NotMemoryFoldable;
+                "lldt{w}\t$src", []>, TB;
 } // SchedRW
 
 //===----------------------------------------------------------------------===//
@@ -451,10 +451,10 @@ def SMSW16m : I<0x01, MRM4m, (outs), (ins i16mem:$dst),
                 "smsw{w}\t$dst", []>, TB;
 
 def LMSW16r : I<0x01, MRM6r, (outs), (ins GR16:$src),
-                "lmsw{w}\t$src", []>, TB, NotMemoryFoldable;
+                "lmsw{w}\t$src", []>, TB;
 let mayLoad = 1 in
 def LMSW16m : I<0x01, MRM6m, (outs), (ins i16mem:$src),
-                "lmsw{w}\t$src", []>, TB, NotMemoryFoldable;
+                "lmsw{w}\t$src", []>, TB;
 
 let Defs = [EAX, EBX, ECX, EDX], Uses = [EAX, ECX] in
   def CPUID : I<0xA2, RawFrm, (outs), (ins), "cpuid", []>, TB;

diff  --git a/llvm/lib/Target/X86/X86InstrVMX.td b/llvm/lib/Target/X86/X86InstrVMX.td
index d204a33358ea..cfeddbccccac 100644
--- a/llvm/lib/Target/X86/X86InstrVMX.td
+++ b/llvm/lib/Target/X86/X86InstrVMX.td
@@ -49,35 +49,27 @@ def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
 def VMPTRSTm : I<0xC7, MRM7m, (outs), (ins i64mem:$vmcs),
   "vmptrst\t$vmcs", []>, PS;
 def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
-  "vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>,
-  NotMemoryFoldable;
+  "vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
 def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
-  "vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>,
-  NotMemoryFoldable;
+  "vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
 
 let mayStore = 1 in {
 def VMREAD64mr : I<0x78, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
-  "vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>,
-  NotMemoryFoldable;
+  "vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
 def VMREAD32mr : I<0x78, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
-  "vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>,
-  NotMemoryFoldable;
+  "vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
 } // mayStore
 
 def VMWRITE64rr : I<0x79, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
-  "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>,
-  NotMemoryFoldable;
+  "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
 def VMWRITE32rr : I<0x79, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
-  "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>,
-  NotMemoryFoldable;
+  "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
 
 let mayLoad = 1 in {
 def VMWRITE64rm : I<0x79, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
-  "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>,
-  NotMemoryFoldable;
+  "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
 def VMWRITE32rm : I<0x79, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
-  "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>,
-  NotMemoryFoldable;
+  "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
 } // mayLoad
 
 // 0F 01 C4

diff  --git a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
index b576e36a5281..e02e7f48594f 100644
--- a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
@@ -43,7 +43,11 @@ const char *ExplicitUnalign[] = {"MOVDQU", "MOVUPS", "MOVUPD",
 const ManualMapEntry ManualMapSet[] = {
 #define ENTRY(REG, MEM, FLAGS) {#REG, #MEM, FLAGS},
 #include "X86ManualFoldTables.def"
-#undef ENTRY
+};
+
+const std::set<StringRef> NoFoldSet= {
+#define NOFOLD(INSN) #INSN,
+#include "X86ManualFoldTables.def"
 };
 
 static bool isExplicitAlign(const CodeGenInstruction *Inst) {
@@ -471,7 +475,7 @@ void X86FoldTablesEmitter::updateTables(const CodeGenInstruction *RegInstr,
   unsigned RegInSize = RegRec->getValueAsDag("InOperandList")->getNumArgs();
 
   // Instructions which Read-Modify-Write should be added to Table2Addr.
-  if (MemOutSize != RegOutSize && MemInSize == RegInSize) {
+  if (!MemOutSize && RegOutSize == 1 && MemInSize == RegInSize) {
     addEntryWithFlags(Table2Addr, RegInstr, MemInstr, S, 0, IsManual);
     return;
   }
@@ -538,7 +542,9 @@ void X86FoldTablesEmitter::run(raw_ostream &o) {
     if (!Rec->isSubClassOf("X86Inst") || Rec->getValueAsBit("isAsmParserOnly"))
       continue;
 
-    // - Do not proceed if the instruction is marked as notMemoryFoldable.
+    if (NoFoldSet.find(Rec->getName()) != NoFoldSet.end())
+      continue;
+
     // - Instructions including RST register class operands are not relevant
     //   for memory folding (for further details check the explanation in
     //   lib/Target/X86/X86InstrFPStack.td file).
@@ -546,8 +552,7 @@ void X86FoldTablesEmitter::run(raw_ostream &o) {
     //   class ptr_rc_tailcall, which can be of a size 32 or 64, to ensure
     //   safe mapping of these instruction we manually map them and exclude
     //   them from the automation.
-    if (Rec->getValueAsBit("isMemoryFoldable") == false ||
-        hasRSTRegClass(Inst) || hasPtrTailcallRegClass(Inst))
+    if (hasRSTRegClass(Inst) || hasPtrTailcallRegClass(Inst))
       continue;
 
     // Add all the memory form instructions to MemInsts, and all the register

diff  --git a/llvm/utils/TableGen/X86ManualFoldTables.def b/llvm/utils/TableGen/X86ManualFoldTables.def
index ba027091c520..9c9e8bde8102 100644
--- a/llvm/utils/TableGen/X86ManualFoldTables.def
+++ b/llvm/utils/TableGen/X86ManualFoldTables.def
@@ -10,10 +10,210 @@
 // special handling.
 //===----------------------------------------------------------------------===//
 
+#ifndef NOFOLD
+#define NOFOLD(INSN)
+#endif
+NOFOLD(BTC16rr)
+NOFOLD(BTC32rr)
+NOFOLD(BTC64rr)
+NOFOLD(BTR16rr)
+NOFOLD(BTR32rr)
+NOFOLD(BTR64rr)
+NOFOLD(BTS16rr)
+NOFOLD(BTS32rr)
+NOFOLD(BTS64rr)
+NOFOLD(VCOMPRESSPDZ128rrk)
+NOFOLD(VCOMPRESSPDZ256rrk)
+NOFOLD(VCOMPRESSPDZrrk)
+NOFOLD(VCOMPRESSPSZ128rrk)
+NOFOLD(VCOMPRESSPSZ256rrk)
+NOFOLD(VCOMPRESSPSZrrk)
+NOFOLD(VCVTPS2PHZ128rrk)
+NOFOLD(VCVTPS2PHZ256rrk)
+NOFOLD(VCVTPS2PHZrrk)
+NOFOLD(VEXTRACTF32x4Z256rrk)
+NOFOLD(VEXTRACTF32x4Zrrk)
+NOFOLD(VEXTRACTF32x8Zrrk)
+NOFOLD(VEXTRACTF64x2Z256rrk)
+NOFOLD(VEXTRACTF64x2Zrrk)
+NOFOLD(VEXTRACTF64x4Zrrk)
+NOFOLD(VEXTRACTI32x4Z256rrk)
+NOFOLD(VEXTRACTI32x4Zrrk)
+NOFOLD(VEXTRACTI32x8Zrrk)
+NOFOLD(VEXTRACTI64x2Z256rrk)
+NOFOLD(VEXTRACTI64x2Zrrk)
+NOFOLD(VEXTRACTI64x4Zrrk)
+NOFOLD(VMOVAPDZ128mrk)
+NOFOLD(VMOVAPDZ256mrk)
+NOFOLD(VMOVAPDZmrk)
+NOFOLD(VMOVAPSZ128mrk)
+NOFOLD(VMOVAPSZ256mrk)
+NOFOLD(VMOVAPSZmrk)
+NOFOLD(VMOVDQA32Z128mrk)
+NOFOLD(VMOVDQA32Z256mrk)
+NOFOLD(VMOVDQA32Zmrk)
+NOFOLD(VMOVDQA64Z128mrk)
+NOFOLD(VMOVDQA64Z256mrk)
+NOFOLD(VMOVDQA64Zmrk)
+NOFOLD(VMOVDQU16Z128mrk)
+NOFOLD(VMOVDQU16Z256mrk)
+NOFOLD(VMOVDQU16Zmrk)
+NOFOLD(VMOVDQU32Z128mrk)
+NOFOLD(VMOVDQU32Z256mrk)
+NOFOLD(VMOVDQU32Zmrk)
+NOFOLD(VMOVDQU64Z128mrk)
+NOFOLD(VMOVDQU64Z256mrk)
+NOFOLD(VMOVDQU64Zmrk)
+NOFOLD(VMOVDQU8Z128mrk)
+NOFOLD(VMOVDQU8Z256mrk)
+NOFOLD(VMOVDQU8Zmrk)
+NOFOLD(VMOVUPDZ128mrk)
+NOFOLD(VMOVUPDZ256mrk)
+NOFOLD(VMOVUPDZmrk)
+NOFOLD(VMOVUPSZ128mrk)
+NOFOLD(VMOVUPSZ256mrk)
+NOFOLD(VMOVUPSZmrk)
+NOFOLD(VPCOMPRESSBZ128rrk)
+NOFOLD(VPCOMPRESSBZ256rrk)
+NOFOLD(VPCOMPRESSBZrrk)
+NOFOLD(VPCOMPRESSDZ128rrk)
+NOFOLD(VPCOMPRESSDZ256rrk)
+NOFOLD(VPCOMPRESSDZrrk)
+NOFOLD(VPCOMPRESSQZ128rrk)
+NOFOLD(VPCOMPRESSQZ256rrk)
+NOFOLD(VPCOMPRESSQZrrk)
+NOFOLD(VPCOMPRESSWZ128rrk)
+NOFOLD(VPCOMPRESSWZ256rrk)
+NOFOLD(VPCOMPRESSWZrrk)
+NOFOLD(VPMOVDBZ128rrk)
+NOFOLD(VPMOVDBZ256rrk)
+NOFOLD(VPMOVDBZrrk)
+NOFOLD(VPMOVDWZ128rrk)
+NOFOLD(VPMOVDWZ256rrk)
+NOFOLD(VPMOVDWZrrk)
+NOFOLD(VPMOVQBZ128rrk)
+NOFOLD(VPMOVQBZ256rrk)
+NOFOLD(VPMOVQBZrrk)
+NOFOLD(VPMOVQDZ128rrk)
+NOFOLD(VPMOVQDZ256rrk)
+NOFOLD(VPMOVQDZrrk)
+NOFOLD(VPMOVQWZ128rrk)
+NOFOLD(VPMOVQWZ256rrk)
+NOFOLD(VPMOVQWZrrk)
+NOFOLD(VPMOVSDBZ128rrk)
+NOFOLD(VPMOVSDBZ256rrk)
+NOFOLD(VPMOVSDBZrrk)
+NOFOLD(VPMOVSDWZ128rrk)
+NOFOLD(VPMOVSDWZ256rrk)
+NOFOLD(VPMOVSDWZrrk)
+NOFOLD(VPMOVSQBZ128rrk)
+NOFOLD(VPMOVSQBZ256rrk)
+NOFOLD(VPMOVSQBZrrk)
+NOFOLD(VPMOVSQDZ128rrk)
+NOFOLD(VPMOVSQDZ256rrk)
+NOFOLD(VPMOVSQDZrrk)
+NOFOLD(VPMOVSQWZ128rrk)
+NOFOLD(VPMOVSQWZ256rrk)
+NOFOLD(VPMOVSQWZrrk)
+NOFOLD(VPMOVSWBZ128rrk)
+NOFOLD(VPMOVSWBZ256rrk)
+NOFOLD(VPMOVSWBZrrk)
+NOFOLD(VPMOVUSDBZ128rrk)
+NOFOLD(VPMOVUSDBZ256rrk)
+NOFOLD(VPMOVUSDBZrrk)
+NOFOLD(VPMOVUSDWZ128rrk)
+NOFOLD(VPMOVUSDWZ256rrk)
+NOFOLD(VPMOVUSDWZrrk)
+NOFOLD(VPMOVUSQBZ128rrk)
+NOFOLD(VPMOVUSQBZ256rrk)
+NOFOLD(VPMOVUSQBZrrk)
+NOFOLD(VPMOVUSQDZ128rrk)
+NOFOLD(VPMOVUSQDZ256rrk)
+NOFOLD(VPMOVUSQDZrrk)
+NOFOLD(VPMOVUSQWZ128rrk)
+NOFOLD(VPMOVUSQWZ256rrk)
+NOFOLD(VPMOVUSQWZrrk)
+NOFOLD(VPMOVUSWBZ128rrk)
+NOFOLD(VPMOVUSWBZ256rrk)
+NOFOLD(VPMOVUSWBZrrk)
+NOFOLD(VPMOVWBZ128rrk)
+NOFOLD(VPMOVWBZ256rrk)
+NOFOLD(VPMOVWBZrrk)
+NOFOLD(ARPL16rr)
+NOFOLD(BT16rr)
+NOFOLD(BT32rr)
+NOFOLD(BT64rr)
+NOFOLD(CMPXCHG16rr)
+NOFOLD(CMPXCHG32rr)
+NOFOLD(CMPXCHG64rr)
+NOFOLD(CMPXCHG8rr)
+NOFOLD(LLDT16r)
+NOFOLD(LMSW16r)
+NOFOLD(LTRr)
+NOFOLD(NOOPLr)
+NOFOLD(NOOPQr)
+NOFOLD(NOOPWr)
+NOFOLD(POP16rmr)
+NOFOLD(POP32rmr)
+NOFOLD(POP64rmr)
+NOFOLD(PUSH16rmr)
+NOFOLD(PUSH32rmr)
+NOFOLD(PUSH64rmr)
+NOFOLD(VCOMPRESSPDZ128rr)
+NOFOLD(VCOMPRESSPDZ256rr)
+NOFOLD(VCOMPRESSPDZrr)
+NOFOLD(VCOMPRESSPSZ128rr)
+NOFOLD(VCOMPRESSPSZ256rr)
+NOFOLD(VCOMPRESSPSZrr)
+NOFOLD(VERRr)
+NOFOLD(VERWr)
+NOFOLD(VMREAD32rr)
+NOFOLD(VMREAD64rr)
+NOFOLD(VPCOMPRESSBZ128rr)
+NOFOLD(VPCOMPRESSBZ256rr)
+NOFOLD(VPCOMPRESSBZrr)
+NOFOLD(VPCOMPRESSDZ128rr)
+NOFOLD(VPCOMPRESSDZ256rr)
+NOFOLD(VPCOMPRESSDZrr)
+NOFOLD(VPCOMPRESSQZ128rr)
+NOFOLD(VPCOMPRESSQZ256rr)
+NOFOLD(VPCOMPRESSQZrr)
+NOFOLD(VPCOMPRESSWZ128rr)
+NOFOLD(VPCOMPRESSWZ256rr)
+NOFOLD(VPCOMPRESSWZrr)
+NOFOLD(LAR16rr)
+NOFOLD(LAR32rr)
+NOFOLD(LAR64rr)
+NOFOLD(LSL16rr)
+NOFOLD(LSL32rr)
+NOFOLD(LSL64rr)
+NOFOLD(MOVSX16rr16)
+NOFOLD(MOVZX16rr16)
+NOFOLD(VMWRITE32rr)
+NOFOLD(VMWRITE64rr)
+NOFOLD(VBLENDMPDZ128rrkz)
+NOFOLD(VBLENDMPDZ256rrkz)
+NOFOLD(VBLENDMPDZrrkz)
+NOFOLD(VBLENDMPSZ128rrkz)
+NOFOLD(VBLENDMPSZ256rrkz)
+NOFOLD(VBLENDMPSZrrkz)
+NOFOLD(VPBLENDMBZ128rrkz)
+NOFOLD(VPBLENDMBZ256rrkz)
+NOFOLD(VPBLENDMBZrrkz)
+NOFOLD(VPBLENDMDZ128rrkz)
+NOFOLD(VPBLENDMDZ256rrkz)
+NOFOLD(VPBLENDMDZrrkz)
+NOFOLD(VPBLENDMQZ128rrkz)
+NOFOLD(VPBLENDMQZ256rrkz)
+NOFOLD(VPBLENDMQZrrkz)
+NOFOLD(VPBLENDMWZ128rrkz)
+NOFOLD(VPBLENDMWZ256rrkz)
+NOFOLD(VPBLENDMWZrrkz)
+#undef NOFOLD
+
 #ifndef ENTRY
 #define ENTRY(REG, MEM, FLAGS)
 #endif
-
 // Part1: These following records are for manually mapping instructions that
 // do not match by their encoding.
 ENTRY(ADD16ri_DB, ADD16mi, TB_NO_REVERSE)
@@ -93,3 +293,4 @@ ENTRY(UD1Wr, UD1Wm, TB_NO_REVERSE | TB_NO_FORWARD)
 ENTRY(MMX_MOVQ64rr, MMX_MOVQ64mr, TB_NO_FORWARD | TB_NO_REVERSE)
 // Exclude this b/c it would conflicts with  {MMX_MOVD64from64rr, MMX_MOVQ64rm} in unfolding table
 ENTRY(MMX_MOVQ64rr, MMX_MOVQ64rm, TB_NO_FORWARD | TB_NO_REVERSE)
+#undef ENTRY


        


More information about the llvm-commits mailing list