[llvm] [X86][NFC] Reorgnize the X86Instr*.td (PR #74454)

via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 5 03:52:53 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-x86

Author: Shengchen Kan (KanRobert)

<details>
<summary>Changes</summary>

1. Move all pattern fragments for SIMD instructions to X86InstrFragmentsSIMD.td
2. Create X86InstrFragments.td and move non-SIMD pattern fragments into it
3. Create X86InstrOperands.td and move operand definitions into it
4. Create X86InstrPredicates.td and move predicate definitions into it
4. Create X86InstrUtils.td and move utilities for simplifying the instruction
   definitions into it


---

Patch is 249.83 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/74454.diff


14 Files Affected:

- (modified) llvm/lib/Target/X86/X86Instr3DNow.td (-10) 
- (modified) llvm/lib/Target/X86/X86InstrAVX512.td (-283) 
- (modified) llvm/lib/Target/X86/X86InstrArithmetic.td (-24) 
- (modified) llvm/lib/Target/X86/X86InstrCompiler.td (-87) 
- (modified) llvm/lib/Target/X86/X86InstrFPStack.td (-121) 
- (modified) llvm/lib/Target/X86/X86InstrFormats.td (-706) 
- (added) llvm/lib/Target/X86/X86InstrFragments.td (+841) 
- (modified) llvm/lib/Target/X86/X86InstrFragmentsSIMD.td (+113-4) 
- (modified) llvm/lib/Target/X86/X86InstrInfo.td (+9-1391) 
- (modified) llvm/lib/Target/X86/X86InstrMisc.td (-20) 
- (added) llvm/lib/Target/X86/X86InstrOperands.td (+497) 
- (added) llvm/lib/Target/X86/X86InstrPredicates.td (+207) 
- (modified) llvm/lib/Target/X86/X86InstrSSE.td (-5) 
- (added) llvm/lib/Target/X86/X86InstrUtils.td (+1014) 


``````````diff
diff --git a/llvm/lib/Target/X86/X86Instr3DNow.td b/llvm/lib/Target/X86/X86Instr3DNow.td
index d5651b6776957..3be03ab0f4332 100644
--- a/llvm/lib/Target/X86/X86Instr3DNow.td
+++ b/llvm/lib/Target/X86/X86Instr3DNow.td
@@ -79,16 +79,6 @@ let SchedRW = [WriteEMMS],
 def FEMMS : I3DNow<0x0E, RawFrm, (outs), (ins), "femms",
                    [(int_x86_mmx_femms)]>, TB;
 
-// PREFETCHWT1 is supported we want to use it for everything but T0.
-def PrefetchWLevel : PatFrag<(ops), (i32 timm), [{
-  return N->getSExtValue() == 3 || !Subtarget->hasPREFETCHWT1();
-}]>;
-
-// Use PREFETCHWT1 for NTA, T2, T1.
-def PrefetchWT1Level : TImmLeaf<i32, [{
-  return Imm < 3;
-}]>;
-
 let SchedRW = [WriteLoad] in {
 let Predicates = [Has3DNow, NoSSEPrefetch] in
 def PREFETCH : I3DNow<0x0D, MRM0m, (outs), (ins i8mem:$addr),
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 77b359e84fbd2..5eb893a82fcc7 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -12,194 +12,6 @@
 //
 //===----------------------------------------------------------------------===//
 
-// Group template arguments that can be derived from the vector type (EltNum x
-// EltVT).  These are things like the register class for the writemask, etc.
-// The idea is to pass one of these as the template argument rather than the
-// individual arguments.
-// The template is also used for scalar types, in this case numelts is 1.
-class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc,
-                      string suffix = ""> {
-  RegisterClass RC = rc;
-  ValueType EltVT = eltvt;
-  int NumElts = numelts;
-
-  // Corresponding mask register class.
-  RegisterClass KRC = !cast<RegisterClass>("VK" # NumElts);
-
-  // Corresponding mask register pair class.
-  RegisterOperand KRPC = !if (!gt(NumElts, 16), ?,
-                              !cast<RegisterOperand>("VK" # NumElts # "Pair"));
-
-  // Corresponding write-mask register class.
-  RegisterClass KRCWM = !cast<RegisterClass>("VK" # NumElts # "WM");
-
-  // The mask VT.
-  ValueType KVT = !cast<ValueType>("v" # NumElts # "i1");
-
-  // Suffix used in the instruction mnemonic.
-  string Suffix = suffix;
-
-  // VTName is a string name for vector VT. For vector types it will be
-  // v # NumElts # EltVT, so for vector of 8 elements of i32 it will be v8i32
-  // It is a little bit complex for scalar types, where NumElts = 1.
-  // In this case we build v4f32 or v2f64
-  string VTName = "v" # !if (!eq (NumElts, 1),
-                        !if (!eq (EltVT.Size, 16), 8,
-                        !if (!eq (EltVT.Size, 32), 4,
-                        !if (!eq (EltVT.Size, 64), 2, NumElts))), NumElts) # EltVT;
-
-  // The vector VT.
-  ValueType VT = !cast<ValueType>(VTName);
-
-  string EltTypeName = !cast<string>(EltVT);
-  // Size of the element type in bits, e.g. 32 for v16i32.
-  string EltSizeName = !subst("i", "", !subst("f", "", !subst("b", "", EltTypeName)));
-  int EltSize = EltVT.Size;
-
-  // "i" for integer types and "f" for floating-point types
-  string TypeVariantName = !subst("b", "", !subst(EltSizeName, "", EltTypeName));
-
-  // Size of RC in bits, e.g. 512 for VR512.
-  int Size = VT.Size;
-
-  // The corresponding memory operand, e.g. i512mem for VR512.
-  X86MemOperand MemOp = !cast<X86MemOperand>(TypeVariantName # Size # "mem");
-  X86MemOperand ScalarMemOp = !cast<X86MemOperand>(!subst("b", "", EltTypeName) # "mem");
-  // FP scalar memory operand for intrinsics - ssmem/sdmem.
-  Operand IntScalarMemOp = !if (!eq (EltTypeName, "f16"), !cast<Operand>("shmem"),
-                           !if (!eq (EltTypeName, "bf16"), !cast<Operand>("shmem"),
-                           !if (!eq (EltTypeName, "f32"), !cast<Operand>("ssmem"),
-                           !if (!eq (EltTypeName, "f64"), !cast<Operand>("sdmem"), ?))));
-
-  // Load patterns
-  PatFrag LdFrag = !cast<PatFrag>("load" # VTName);
-
-  PatFrag AlignedLdFrag = !cast<PatFrag>("alignedload" # VTName);
-
-  PatFrag ScalarLdFrag = !cast<PatFrag>("load" # !subst("b", "", EltTypeName));
-  PatFrag BroadcastLdFrag = !cast<PatFrag>("X86VBroadcastld" # EltSizeName);
-
-  PatFrags ScalarIntMemFrags = !if (!eq (EltTypeName, "f16"), !cast<PatFrags>("sse_load_f16"),
-                               !if (!eq (EltTypeName, "bf16"), !cast<PatFrags>("sse_load_f16"),
-                               !if (!eq (EltTypeName, "f32"), !cast<PatFrags>("sse_load_f32"),
-                               !if (!eq (EltTypeName, "f64"), !cast<PatFrags>("sse_load_f64"), ?))));
-
-  // The string to specify embedded broadcast in assembly.
-  string BroadcastStr = "{1to" # NumElts # "}";
-
-  // 8-bit compressed displacement tuple/subvector format.  This is only
-  // defined for NumElts <= 8.
-  CD8VForm CD8TupleForm = !if (!eq (!srl(NumElts, 4), 0),
-                               !cast<CD8VForm>("CD8VT" # NumElts), ?);
-
-  SubRegIndex SubRegIdx = !if (!eq (Size, 128), sub_xmm,
-                          !if (!eq (Size, 256), sub_ymm, ?));
-
-  Domain ExeDomain = !if (!eq (EltTypeName, "f32"), SSEPackedSingle,
-                     !if (!eq (EltTypeName, "f64"), SSEPackedDouble,
-                     !if (!eq (EltTypeName, "f16"), SSEPackedSingle, // FIXME?
-                     !if (!eq (EltTypeName, "bf16"), SSEPackedSingle, // FIXME?
-                     SSEPackedInt))));
-
-  RegisterClass FRC = !if (!eq (EltTypeName, "f32"), FR32X,
-                      !if (!eq (EltTypeName, "f16"), FR16X,
-                      !if (!eq (EltTypeName, "bf16"), FR16X,
-                      FR64X)));
-
-  dag ImmAllZerosV = (VT immAllZerosV);
-
-  string ZSuffix = !if (!eq (Size, 128), "Z128",
-                   !if (!eq (Size, 256), "Z256", "Z"));
-}
-
-def v64i8_info  : X86VectorVTInfo<64,  i8, VR512, "b">;
-def v32i16_info : X86VectorVTInfo<32, i16, VR512, "w">;
-def v16i32_info : X86VectorVTInfo<16, i32, VR512, "d">;
-def v8i64_info  : X86VectorVTInfo<8,  i64, VR512, "q">;
-def v32f16_info : X86VectorVTInfo<32, f16, VR512, "ph">;
-def v32bf16_info: X86VectorVTInfo<32, bf16, VR512, "pbf">;
-def v16f32_info : X86VectorVTInfo<16, f32, VR512, "ps">;
-def v8f64_info  : X86VectorVTInfo<8,  f64, VR512, "pd">;
-
-// "x" in v32i8x_info means RC = VR256X
-def v32i8x_info  : X86VectorVTInfo<32,  i8, VR256X, "b">;
-def v16i16x_info : X86VectorVTInfo<16, i16, VR256X, "w">;
-def v8i32x_info  : X86VectorVTInfo<8,  i32, VR256X, "d">;
-def v4i64x_info  : X86VectorVTInfo<4,  i64, VR256X, "q">;
-def v16f16x_info : X86VectorVTInfo<16, f16, VR256X, "ph">;
-def v16bf16x_info: X86VectorVTInfo<16, bf16, VR256X, "pbf">;
-def v8f32x_info  : X86VectorVTInfo<8,  f32, VR256X, "ps">;
-def v4f64x_info  : X86VectorVTInfo<4,  f64, VR256X, "pd">;
-
-def v16i8x_info  : X86VectorVTInfo<16,  i8, VR128X, "b">;
-def v8i16x_info  : X86VectorVTInfo<8,  i16, VR128X, "w">;
-def v4i32x_info  : X86VectorVTInfo<4,  i32, VR128X, "d">;
-def v2i64x_info  : X86VectorVTInfo<2,  i64, VR128X, "q">;
-def v8f16x_info  : X86VectorVTInfo<8,  f16, VR128X, "ph">;
-def v8bf16x_info : X86VectorVTInfo<8,  bf16, VR128X, "pbf">;
-def v4f32x_info  : X86VectorVTInfo<4,  f32, VR128X, "ps">;
-def v2f64x_info  : X86VectorVTInfo<2,  f64, VR128X, "pd">;
-
-// We map scalar types to the smallest (128-bit) vector type
-// with the appropriate element type. This allows to use the same masking logic.
-def i32x_info    : X86VectorVTInfo<1,  i32, GR32, "si">;
-def i64x_info    : X86VectorVTInfo<1,  i64, GR64, "sq">;
-def f16x_info    : X86VectorVTInfo<1,  f16, VR128X, "sh">;
-def bf16x_info   : X86VectorVTInfo<1,  bf16, VR128X, "sbf">;
-def f32x_info    : X86VectorVTInfo<1,  f32, VR128X, "ss">;
-def f64x_info    : X86VectorVTInfo<1,  f64, VR128X, "sd">;
-
-class AVX512VLVectorVTInfo<X86VectorVTInfo i512, X86VectorVTInfo i256,
-                           X86VectorVTInfo i128> {
-  X86VectorVTInfo info512 = i512;
-  X86VectorVTInfo info256 = i256;
-  X86VectorVTInfo info128 = i128;
-}
-
-def avx512vl_i8_info  : AVX512VLVectorVTInfo<v64i8_info, v32i8x_info,
-                                             v16i8x_info>;
-def avx512vl_i16_info : AVX512VLVectorVTInfo<v32i16_info, v16i16x_info,
-                                             v8i16x_info>;
-def avx512vl_i32_info : AVX512VLVectorVTInfo<v16i32_info, v8i32x_info,
-                                             v4i32x_info>;
-def avx512vl_i64_info : AVX512VLVectorVTInfo<v8i64_info, v4i64x_info,
-                                             v2i64x_info>;
-def avx512vl_f16_info : AVX512VLVectorVTInfo<v32f16_info, v16f16x_info,
-                                             v8f16x_info>;
-def avx512vl_bf16_info : AVX512VLVectorVTInfo<v32bf16_info, v16bf16x_info,
-                                             v8bf16x_info>;
-def avx512vl_f32_info : AVX512VLVectorVTInfo<v16f32_info, v8f32x_info,
-                                             v4f32x_info>;
-def avx512vl_f64_info : AVX512VLVectorVTInfo<v8f64_info, v4f64x_info,
-                                             v2f64x_info>;
-
-class X86KVectorVTInfo<RegisterClass _krc, RegisterClass _krcwm,
-                       ValueType _vt> {
-  RegisterClass KRC = _krc;
-  RegisterClass KRCWM = _krcwm;
-  ValueType KVT = _vt;
-}
-
-def v1i1_info : X86KVectorVTInfo<VK1, VK1WM, v1i1>;
-def v2i1_info : X86KVectorVTInfo<VK2, VK2WM, v2i1>;
-def v4i1_info : X86KVectorVTInfo<VK4, VK4WM, v4i1>;
-def v8i1_info : X86KVectorVTInfo<VK8, VK8WM, v8i1>;
-def v16i1_info : X86KVectorVTInfo<VK16, VK16WM, v16i1>;
-def v32i1_info : X86KVectorVTInfo<VK32, VK32WM, v32i1>;
-def v64i1_info : X86KVectorVTInfo<VK64, VK64WM, v64i1>;
-
-// Used for matching masked operations. Ensures the operation part only has a
-// single use.
-def vselect_mask : PatFrag<(ops node:$mask, node:$src1, node:$src2),
-                           (vselect node:$mask, node:$src1, node:$src2), [{
-  return isProfitableToFormMaskedOp(N);
-}]>;
-
-def X86selects_mask : PatFrag<(ops node:$mask, node:$src1, node:$src2),
-                              (X86selects node:$mask, node:$src1, node:$src2), [{
-  return isProfitableToFormMaskedOp(N);
-}]>;
-
 // This multiclass generates the masking variants from the non-masking
 // variant.  It only provides the assembly pieces for the masking variants.
 // It assumes custom ISel patterns for masking which can be provided as
@@ -2157,15 +1969,6 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeSAE,
   }
 }
 
-def X86cmpms_su : PatFrag<(ops node:$src1, node:$src2, node:$cc),
-                          (X86cmpms node:$src1, node:$src2, node:$cc), [{
-  return N->hasOneUse();
-}]>;
-def X86cmpmsSAE_su : PatFrag<(ops node:$src1, node:$src2, node:$cc),
-                          (X86cmpmsSAE node:$src1, node:$src2, node:$cc), [{
-  return N->hasOneUse();
-}]>;
-
 let Predicates = [HasAVX512] in {
   let ExeDomain = SSEPackedSingle in
   defm VCMPSSZ : avx512_cmp_scalar<f32x_info, X86cmpms, X86cmpmsSAE,
@@ -2261,12 +2064,6 @@ multiclass avx512_icmp_packed_rmb_vl<bits<8> opc, string OpcodeStr,
   }
 }
 
-// This fragment treats X86cmpm as commutable to help match loads in both
-// operands for PCMPEQ.
-def X86setcc_commute : SDNode<"ISD::SETCC", SDTSetCC, [SDNPCommutative]>;
-def X86pcmpgtm : PatFrag<(ops node:$src1, node:$src2),
-                         (setcc node:$src1, node:$src2, SETGT)>;
-
 // AddedComplexity is needed because the explicit SETEQ/SETGT CondCode doesn't
 // increase the pattern complexity the way an immediate would.
 let AddedComplexity = 2 in {
@@ -2304,20 +2101,6 @@ defm VPCMPGTQ : avx512_icmp_packed_rmb_vl<0x37, "vpcmpgtq",
                 T8PD, REX_W, EVEX_CD8<64, CD8VF>;
 }
 
-def X86pcmpm_imm : SDNodeXForm<setcc, [{
-  ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
-  uint8_t SSECC = X86::getVPCMPImmForCond(CC);
-  return getI8Imm(SSECC, SDLoc(N));
-}]>;
-
-// Swapped operand version of the above.
-def X86pcmpm_imm_commute : SDNodeXForm<setcc, [{
-  ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
-  uint8_t SSECC = X86::getVPCMPImmForCond(CC);
-  SSECC = X86::getSwappedVPCMPImm(SSECC);
-  return getI8Imm(SSECC, SDLoc(N));
-}]>;
-
 multiclass avx512_icmp_cc<bits<8> opc, string Suffix, PatFrag Frag,
                           PatFrag Frag_su,
                           X86FoldableSchedWrite sched,
@@ -2451,30 +2234,6 @@ multiclass avx512_icmp_cc_rmb_vl<bits<8> opc, string Suffix, PatFrag Frag,
   }
 }
 
-def X86pcmpm : PatFrag<(ops node:$src1, node:$src2, node:$cc),
-                       (setcc node:$src1, node:$src2, node:$cc), [{
-  ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
-  return !ISD::isUnsignedIntSetCC(CC);
-}], X86pcmpm_imm>;
-
-def X86pcmpm_su : PatFrag<(ops node:$src1, node:$src2, node:$cc),
-                          (setcc node:$src1, node:$src2, node:$cc), [{
-  ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
-  return N->hasOneUse() && !ISD::isUnsignedIntSetCC(CC);
-}], X86pcmpm_imm>;
-
-def X86pcmpum : PatFrag<(ops node:$src1, node:$src2, node:$cc),
-                        (setcc node:$src1, node:$src2, node:$cc), [{
-  ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
-  return ISD::isUnsignedIntSetCC(CC);
-}], X86pcmpm_imm>;
-
-def X86pcmpum_su : PatFrag<(ops node:$src1, node:$src2, node:$cc),
-                           (setcc node:$src1, node:$src2, node:$cc), [{
-  ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
-  return N->hasOneUse() && ISD::isUnsignedIntSetCC(CC);
-}], X86pcmpm_imm>;
-
 // FIXME: Is there a better scheduler class for VPCMP/VPCMPU?
 defm VPCMPB : avx512_icmp_cc_vl<0x3F, "b", X86pcmpm, X86pcmpm_su,
                                 SchedWriteVecALU, avx512vl_i8_info, HasBWI>,
@@ -2504,16 +2263,6 @@ defm VPCMPUQ : avx512_icmp_cc_rmb_vl<0x1E, "uq", X86pcmpum, X86pcmpum_su,
                                      SchedWriteVecALU, avx512vl_i64_info,
                                      HasAVX512>, REX_W, EVEX_CD8<64, CD8VF>;
 
-def X86cmpm_su : PatFrag<(ops node:$src1, node:$src2, node:$cc),
-                         (X86cmpm node:$src1, node:$src2, node:$cc), [{
-  return N->hasOneUse();
-}]>;
-
-def X86cmpm_imm_commute : SDNodeXForm<timm, [{
-  uint8_t Imm = X86::getSwappedVCMPImm(N->getZExtValue() & 0x1f);
-  return getI8Imm(Imm, SDLoc(N));
-}]>;
-
 multiclass avx512_vcmp_common<X86FoldableSchedWrite sched, X86VectorVTInfo _,
                               string Name> {
 let Uses = [MXCSR], mayRaiseFPException = 1 in {
@@ -2679,16 +2428,6 @@ let Predicates = [HasFP16] in {
 // ----------------------------------------------------------------
 // FPClass
 
-def X86Vfpclasss_su : PatFrag<(ops node:$src1, node:$src2),
-                              (X86Vfpclasss node:$src1, node:$src2), [{
-  return N->hasOneUse();
-}]>;
-
-def X86Vfpclass_su : PatFrag<(ops node:$src1, node:$src2),
-                             (X86Vfpclass node:$src1, node:$src2), [{
-  return N->hasOneUse();
-}]>;
-
 //handle fpclass instruction  mask =  op(reg_scalar,imm)
 //                                    op(mem_scalar,imm)
 multiclass avx512_scalar_fpclass<bits<8> opc, string OpcodeStr,
@@ -3082,10 +2821,6 @@ multiclass avx512_mask_binop_all<bits<8> opc, string OpcodeStr,
                              sched, HasBWI, IsCommutable>, VEX_4V, VEX_L, REX_W, PS;
 }
 
-// These nodes use 'vnot' instead of 'not' to support vectors.
-def vandn : PatFrag<(ops node:$i0, node:$i1), (and (vnot node:$i0), node:$i1)>;
-def vxnor : PatFrag<(ops node:$i0, node:$i1), (vnot (xor node:$i0, node:$i1))>;
-
 // TODO - do we need a X86SchedWriteWidths::KMASK type?
 defm KAND  : avx512_mask_binop_all<0x41, "kand",  and,     SchedWriteVecLogic.XMM, 1>;
 defm KOR   : avx512_mask_binop_all<0x45, "kor",   or,      SchedWriteVecLogic.XMM, 1>;
@@ -9880,19 +9615,6 @@ defm : avx512_masked_scalar<fsqrt, "SQRTSDZ", X86Movsd,
 // Integer truncate and extend operations
 //-------------------------------------------------
 
-// PatFrags that contain a select and a truncate op. The take operands in the
-// same order as X86vmtrunc, X86vmtruncs, X86vmtruncus. This allows us to pass
-// either to the multiclasses.
-def select_trunc : PatFrag<(ops node:$src, node:$src0, node:$mask),
-                           (vselect_mask node:$mask,
-                                         (trunc node:$src), node:$src0)>;
-def select_truncs : PatFrag<(ops node:$src, node:$src0, node:$mask),
-                            (vselect_mask node:$mask,
-                                          (X86vtruncs node:$src), node:$src0)>;
-def select_truncus : PatFrag<(ops node:$src, node:$src0, node:$mask),
-                             (vselect_mask node:$mask,
-                                           (X86vtruncus node:$src), node:$src0)>;
-
 multiclass avx512_trunc_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
                               SDPatternOperator MaskNode,
                               X86FoldableSchedWrite sched, X86VectorVTInfo SrcInfo,
@@ -12676,11 +12398,6 @@ defm VPOPCNTW : avx512_unary_rm_vl<0x54, "vpopcntw", ctpop, SchedWriteVecALU,
 defm : avx512_unary_lowering<"VPOPCNTB", ctpop, avx512vl_i8_info, HasBITALG>;
 defm : avx512_unary_lowering<"VPOPCNTW", ctpop, avx512vl_i16_info, HasBITALG>;
 
-def X86Vpshufbitqmb_su : PatFrag<(ops node:$src1, node:$src2),
-                                 (X86Vpshufbitqmb node:$src1, node:$src2), [{
-  return N->hasOneUse();
-}]>;
-
 multiclass VPSHUFBITQMB_rm<X86FoldableSchedWrite sched, X86VectorVTInfo VTI> {
   defm rr : AVX512_maskable_cmp<0x8F, MRMSrcReg, VTI, (outs VTI.KRC:$dst),
                                 (ins VTI.RC:$src1, VTI.RC:$src2),
diff --git a/llvm/lib/Target/X86/X86InstrArithmetic.td b/llvm/lib/Target/X86/X86InstrArithmetic.td
index 56cbc13eaaec8..8c355e84a0659 100644
--- a/llvm/lib/Target/X86/X86InstrArithmetic.td
+++ b/llvm/lib/Target/X86/X86InstrArithmetic.td
@@ -48,16 +48,6 @@ def PLEA64r   : PseudoI<(outs GR64:$dst), (ins anymem:$src), []>;
 //  Fixed-Register Multiplication and Division Instructions.
 //
 
-// SchedModel info for instruction that loads one value and gets the second
-// (and possibly third) value from a register.
-// This is used for instructions that put the memory operands before other
-// uses.
-class SchedLoadReg<X86FoldableSchedWrite Sched> : Sched<[Sched.Folded,
-  // Memory operand.
-  ReadDefault, ReadDefault, ReadDefault, ReadDefault, ReadDefault,
-  // Register reads (implicit or explicit).
-  Sched.ReadAfterFold, Sched.ReadAfterFold]>;
-
 // BinOpRR - Binary instructions with inputs "reg, reg".
 class BinOpRR<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
               dag outlist, X86FoldableSchedWrite sched, list<dag> pattern>
@@ -506,17 +496,6 @@ class IMulOpRMI<bits<8> opcode, string mnemonic, X86TypeInfo info,
   let ImmT = info.ImmEncoding;
 }
 
-def X86add_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
-                               (X86add_flag node:$lhs, node:$rhs), [{
-  return hasNoCarryFlagUses(SDValue(N, 1));
-}]>;
-
-def X86sub_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
-                               (X86sub_flag node:$lhs, node:$rhs), [{
-  // Only use DEC if the result is used.
-  return !SDValue(N, 0).use_empty() && hasNoCarryFlagUses(SDValue(N, 1));
-}]>;
-
 let Defs = [EFLAGS] in {
 let Constraints = "$src1 = $dst", SchedRW = [WriteALU] in {
 // Short forms only valid in 32-bit mode. Selected during MCInst lowering.
@@ -1221,9 +1200,6 @@ def : Pat<(store (X86adc_flag i64relocImmSExt32_su:$src, (load addr:$dst), EFLAG
 // generate a result.  From an encoding perspective, they are very different:
 // they don't have all the usual imm8 and REV forms, and are encoded into a
 // different space.
-def X86testpat : PatFrag<(ops node:$lhs, node:$rhs),
-                         (X86cmp (and_su node:$lhs, node:$rhs), 0)>;
-
 let isCompare = 1 in {
   let Defs = [EFLAGS] in {
     let isCommutable = 1 in {
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 9e99dbd6fe852..457833f8cc331 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -786,16 +786,6 @@ defm LOCK_OR  : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
 
-def X86lock_add_nocf : P...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/74454


More information about the llvm-commits mailing list