[llvm] c81a121 - Revert "Revert "[X86] Remove patterns for ADC/SBB with immediate 8 and optimize during MC lowering, NFCI""
Shengchen Kan via llvm-commits
llvm-commits at lists.llvm.org
Fri May 19 07:25:18 PDT 2023
Author: Shengchen Kan
Date: 2023-05-19T22:21:56+08:00
New Revision: c81a121f3f230cfe468b6def6d2904b4aefb855b
URL: https://github.com/llvm/llvm-project/commit/c81a121f3f230cfe468b6def6d2904b4aefb855b
DIFF: https://github.com/llvm/llvm-project/commit/c81a121f3f230cfe468b6def6d2904b4aefb855b.diff
LOG: Revert "Revert "[X86] Remove patterns for ADC/SBB with immediate 8 and optimize during MC lowering, NFCI""
This reverts commit cb16b33a03aff70b2499c3452f2f817f3f92d20d.
In fact, the test https://bugs.chromium.org/p/chromium/issues/detail?id=1446973#c2
already passed after 5586bc539acb26cb94e461438de01a5080513401
Added:
Modified:
llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h
llvm/lib/Target/X86/X86CallFrameOptimization.cpp
llvm/lib/Target/X86/X86DynAllocaExpander.cpp
llvm/lib/Target/X86/X86FastISel.cpp
llvm/lib/Target/X86/X86FixupLEAs.cpp
llvm/lib/Target/X86/X86FrameLowering.cpp
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/lib/Target/X86/X86InstrAVX512.td
llvm/lib/Target/X86/X86InstrArithmetic.td
llvm/lib/Target/X86/X86InstrCompiler.td
llvm/lib/Target/X86/X86InstrInfo.cpp
llvm/lib/Target/X86/X86InstructionSelector.cpp
llvm/lib/Target/X86/X86MCInstLower.cpp
llvm/test/CodeGen/MIR/X86/branch-folder-with-label.mir
llvm/test/CodeGen/X86/AMX/amx-greedy-ra-spill-shape.ll
llvm/test/CodeGen/X86/GlobalISel/select-blsi.mir
llvm/test/CodeGen/X86/GlobalISel/select-cmp.mir
llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
llvm/test/CodeGen/X86/GlobalISel/select-ext.mir
llvm/test/CodeGen/X86/GlobalISel/x86_64-select-zext.mir
llvm/test/CodeGen/X86/avxvnni-combine.ll
llvm/test/CodeGen/X86/cfi-xmm.ll
llvm/test/CodeGen/X86/extend-set-cc-uses-dbg.ll
llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
llvm/test/CodeGen/X86/leaFixup32.mir
llvm/test/CodeGen/X86/leaFixup64.mir
llvm/test/CodeGen/X86/limit-split-cost.mir
llvm/test/CodeGen/X86/machinesink-debug-inv-0.mir
llvm/test/CodeGen/X86/optimize-compare.mir
llvm/test/CodeGen/X86/peephole-fold-testrr.mir
llvm/test/CodeGen/X86/pr46827.ll
llvm/test/CodeGen/X86/push-cfi.ll
llvm/test/CodeGen/X86/statepoint-cmp-sunk-past-statepoint.ll
llvm/test/CodeGen/X86/switch-bit-test-unreachable-default.ll
llvm/test/CodeGen/X86/switch-lower-peel-top-case.ll
llvm/test/CodeGen/X86/tail-call-conditional.mir
llvm/test/CodeGen/X86/tail-merge-after-mbp.mir
llvm/test/CodeGen/X86/throws-cfi-fp.ll
llvm/test/CodeGen/X86/twoaddr-dbg-value.mir
llvm/test/CodeGen/X86/update-terminator-debugloc.ll
llvm/test/CodeGen/X86/vecloadextract.ll
llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir
llvm/test/DebugInfo/MIR/InstrRef/twoaddr-to-threeaddr-sub.mir
llvm/test/DebugInfo/MIR/InstrRef/x86-lea-fixup-2.mir
llvm/test/DebugInfo/MIR/InstrRef/x86-lea-fixup.mir
llvm/test/DebugInfo/MIR/X86/empty-inline.mir
llvm/test/DebugInfo/MIR/X86/kill-after-spill.mir
llvm/test/DebugInfo/MIR/X86/live-debug-values-stack-clobber.mir
llvm/test/DebugInfo/MIR/X86/machinesink-subreg.mir
llvm/test/DebugInfo/MIR/X86/machinesink.mir
llvm/test/DebugInfo/MIR/X86/merge-inline-loc4.mir
llvm/test/DebugInfo/MIR/X86/mlicm-hoist-post-regalloc.mir
llvm/test/DebugInfo/X86/debug-loc-asan.mir
llvm/test/DebugInfo/X86/debug-loc-offset.mir
llvm/test/DebugInfo/X86/location-range.mir
llvm/test/DebugInfo/X86/machinecse-wrongdebug-hoist.ll
llvm/test/DebugInfo/X86/sdag-dbgvalue-ssareg.ll
llvm/test/DebugInfo/assignment-tracking/X86/lower-to-value.ll
llvm/test/TableGen/x86-fold-tables.inc
llvm/test/Transforms/SampleProfile/pseudo-probe-twoaddr.ll
llvm/utils/TableGen/X86ManualFoldTables.def
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
index 69f65841d7b5..8c886b6ec85c 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
@@ -370,7 +370,7 @@ bool X86::optimizeMOV(MCInst &MI, bool In64BitMode) {
/// Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with
/// a short fixed-register form.
-bool X86::optimizeToFixedRegisterForm(MCInst &MI) {
+static bool optimizeToFixedRegisterForm(MCInst &MI) {
unsigned NewOpc;
switch (MI.getOpcode()) {
default:
@@ -424,3 +424,77 @@ bool X86::optimizeToFixedRegisterForm(MCInst &MI) {
MI.addOperand(Saved);
return true;
}
+
+static bool optimizeToShortImmediateForm(MCInst &MI) {
+ unsigned NewOpc;
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ FROM_TO(ADC16mi, ADC16mi8)
+ FROM_TO(ADC16ri, ADC16ri8)
+ FROM_TO(ADC32mi, ADC32mi8)
+ FROM_TO(ADC32ri, ADC32ri8)
+ FROM_TO(ADC64mi32, ADC64mi8)
+ FROM_TO(ADC64ri32, ADC64ri8)
+ FROM_TO(SBB16mi, SBB16mi8)
+ FROM_TO(SBB16ri, SBB16ri8)
+ FROM_TO(SBB32mi, SBB32mi8)
+ FROM_TO(SBB32ri, SBB32ri8)
+ FROM_TO(SBB64mi32, SBB64mi8)
+ FROM_TO(SBB64ri32, SBB64ri8)
+ FROM_TO(ADD16mi, ADD16mi8)
+ FROM_TO(ADD16ri, ADD16ri8)
+ FROM_TO(ADD32mi, ADD32mi8)
+ FROM_TO(ADD32ri, ADD32ri8)
+ FROM_TO(ADD64mi32, ADD64mi8)
+ FROM_TO(ADD64ri32, ADD64ri8)
+ FROM_TO(AND16mi, AND16mi8)
+ FROM_TO(AND16ri, AND16ri8)
+ FROM_TO(AND32mi, AND32mi8)
+ FROM_TO(AND32ri, AND32ri8)
+ FROM_TO(AND64mi32, AND64mi8)
+ FROM_TO(AND64ri32, AND64ri8)
+ FROM_TO(OR16mi, OR16mi8)
+ FROM_TO(OR16ri, OR16ri8)
+ FROM_TO(OR32mi, OR32mi8)
+ FROM_TO(OR32ri, OR32ri8)
+ FROM_TO(OR64mi32, OR64mi8)
+ FROM_TO(OR64ri32, OR64ri8)
+ FROM_TO(SUB16mi, SUB16mi8)
+ FROM_TO(SUB16ri, SUB16ri8)
+ FROM_TO(SUB32mi, SUB32mi8)
+ FROM_TO(SUB32ri, SUB32ri8)
+ FROM_TO(SUB64mi32, SUB64mi8)
+ FROM_TO(SUB64ri32, SUB64ri8)
+ FROM_TO(XOR16mi, XOR16mi8)
+ FROM_TO(XOR16ri, XOR16ri8)
+ FROM_TO(XOR32mi, XOR32mi8)
+ FROM_TO(XOR32ri, XOR32ri8)
+ FROM_TO(XOR64mi32, XOR64mi8)
+ FROM_TO(XOR64ri32, XOR64ri8)
+ FROM_TO(CMP16mi, CMP16mi8)
+ FROM_TO(CMP16ri, CMP16ri8)
+ FROM_TO(CMP32mi, CMP32mi8)
+ FROM_TO(CMP32ri, CMP32ri8)
+ FROM_TO(CMP64mi32, CMP64mi8)
+ FROM_TO(CMP64ri32, CMP64ri8)
+ }
+ MCOperand &LastOp = MI.getOperand(MI.getNumOperands() - 1);
+ if (LastOp.isExpr()) {
+ const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(LastOp.getExpr());
+ if (!SRE || SRE->getKind() != MCSymbolRefExpr::VK_X86_ABS8)
+ return false;
+ } else if (LastOp.isImm()) {
+ if (!isInt<8>(LastOp.getImm()))
+ return false;
+ }
+ MI.setOpcode(NewOpc);
+ return true;
+}
+
+bool X86::optimizeToFixedRegisterOrShortImmediateForm(MCInst &MI) {
+ // We may optimize twice here.
+ bool ShortImm = optimizeToShortImmediateForm(MI);
+ bool FixedReg = optimizeToFixedRegisterForm(MI);
+ return ShortImm || FixedReg;
+}
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h
index 7d0c31751e84..e99cfdc09391 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h
@@ -22,7 +22,7 @@ bool optimizeVPCMPWithImmediateOneOrSix(MCInst &MI);
bool optimizeMOVSX(MCInst &MI);
bool optimizeINCDEC(MCInst &MI, bool In64BitMode);
bool optimizeMOV(MCInst &MI, bool In64BitMode);
-bool optimizeToFixedRegisterForm(MCInst &MI);
+bool optimizeToFixedRegisterOrShortImmediateForm(MCInst &MI);
} // namespace X86
} // namespace llvm
#endif
diff --git a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
index 1fa559dcf2bd..1367d2e3d2ba 100644
--- a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
+++ b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
@@ -285,15 +285,15 @@ X86CallFrameOptimization::classifyInstruction(
// The instructions we actually care about are movs onto the stack or special
// cases of constant-stores to stack
switch (MI->getOpcode()) {
- case X86::AND16mi8:
- case X86::AND32mi8:
- case X86::AND64mi8: {
+ case X86::AND16mi:
+ case X86::AND32mi:
+ case X86::AND64mi32: {
const MachineOperand &ImmOp = MI->getOperand(X86::AddrNumOperands);
return ImmOp.getImm() == 0 ? Convert : Exit;
}
- case X86::OR16mi8:
- case X86::OR32mi8:
- case X86::OR64mi8: {
+ case X86::OR16mi:
+ case X86::OR32mi:
+ case X86::OR64mi32: {
const MachineOperand &ImmOp = MI->getOperand(X86::AddrNumOperands);
return ImmOp.getImm() == -1 ? Convert : Exit;
}
@@ -512,12 +512,12 @@ void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
switch (Store->getOpcode()) {
default:
llvm_unreachable("Unexpected Opcode!");
- case X86::AND16mi8:
- case X86::AND32mi8:
- case X86::AND64mi8:
- case X86::OR16mi8:
- case X86::OR32mi8:
- case X86::OR64mi8:
+ case X86::AND16mi:
+ case X86::AND32mi:
+ case X86::AND64mi32:
+ case X86::OR16mi:
+ case X86::OR32mi:
+ case X86::OR64mi32:
case X86::MOV32mi:
case X86::MOV64mi32:
PushOpcode = Is64Bit ? X86::PUSH64i32 : X86::PUSHi32;
diff --git a/llvm/lib/Target/X86/X86DynAllocaExpander.cpp b/llvm/lib/Target/X86/X86DynAllocaExpander.cpp
index 8f237ee386b5..bd2a663f893a 100644
--- a/llvm/lib/Target/X86/X86DynAllocaExpander.cpp
+++ b/llvm/lib/Target/X86/X86DynAllocaExpander.cpp
@@ -189,10 +189,10 @@ void X86DynAllocaExpander::computeLowerings(MachineFunction &MF,
}
}
-static unsigned getSubOpcode(bool Is64Bit, int64_t Amount) {
+static unsigned getSubOpcode(bool Is64Bit) {
if (Is64Bit)
- return isInt<8>(Amount) ? X86::SUB64ri8 : X86::SUB64ri32;
- return isInt<8>(Amount) ? X86::SUB32ri8 : X86::SUB32ri;
+ return X86::SUB64ri32;
+ return X86::SUB32ri;
}
void X86DynAllocaExpander::lower(MachineInstr *MI, Lowering L) {
@@ -242,8 +242,7 @@ void X86DynAllocaExpander::lower(MachineInstr *MI, Lowering L) {
.addReg(RegA, RegState::Undef);
} else {
// Sub.
- BuildMI(*MBB, I, DL,
- TII->get(getSubOpcode(Is64BitAlloca, Amount)), StackPtr)
+ BuildMI(*MBB, I, DL, TII->get(getSubOpcode(Is64BitAlloca)), StackPtr)
.addReg(StackPtr)
.addImm(Amount);
}
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index e414a3d717bc..ff90b402b9b9 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -1376,7 +1376,6 @@ static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
/// If we have a comparison with RHS as the RHS of the comparison, return an
/// opcode that works for the compare (e.g. CMP32ri) otherwise return 0.
static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
- int64_t Val = RHSC->getSExtValue();
switch (VT.getSimpleVT().SimpleTy) {
// Otherwise, we can't fold the immediate into this comparison.
default:
@@ -1384,21 +1383,13 @@ static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
case MVT::i8:
return X86::CMP8ri;
case MVT::i16:
- if (isInt<8>(Val))
- return X86::CMP16ri8;
return X86::CMP16ri;
case MVT::i32:
- if (isInt<8>(Val))
- return X86::CMP32ri8;
return X86::CMP32ri;
case MVT::i64:
- if (isInt<8>(Val))
- return X86::CMP64ri8;
// 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
// field.
- if (isInt<32>(Val))
- return X86::CMP64ri32;
- return 0;
+ return isInt<32>(RHSC->getSExtValue()) ? X86::CMP64ri32 : 0;
}
}
diff --git a/llvm/lib/Target/X86/X86FixupLEAs.cpp b/llvm/lib/Target/X86/X86FixupLEAs.cpp
index 9137c15316df..c702c015d7b3 100644
--- a/llvm/lib/Target/X86/X86FixupLEAs.cpp
+++ b/llvm/lib/Target/X86/X86FixupLEAs.cpp
@@ -186,13 +186,9 @@ FixupLEAPass::postRAConvertToLEA(MachineBasicBlock &MBB,
// Only convert instructions that we've verified are safe.
return nullptr;
case X86::ADD64ri32:
- case X86::ADD64ri8:
case X86::ADD64ri32_DB:
- case X86::ADD64ri8_DB:
case X86::ADD32ri:
- case X86::ADD32ri8:
case X86::ADD32ri_DB:
- case X86::ADD32ri8_DB:
if (!MI.getOperand(2).isImm()) {
// convertToThreeAddress will call getImm()
// which requires isImm() to be true
@@ -374,15 +370,14 @@ static inline unsigned getSUBrrFromLEA(unsigned LEAOpcode) {
static inline unsigned getADDriFromLEA(unsigned LEAOpcode,
const MachineOperand &Offset) {
- bool IsInt8 = Offset.isImm() && isInt<8>(Offset.getImm());
switch (LEAOpcode) {
default:
llvm_unreachable("Unexpected LEA instruction");
case X86::LEA32r:
case X86::LEA64_32r:
- return IsInt8 ? X86::ADD32ri8 : X86::ADD32ri;
+ return X86::ADD32ri;
case X86::LEA64r:
- return IsInt8 ? X86::ADD64ri8 : X86::ADD64ri32;
+ return X86::ADD64ri32;
}
}
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index f512de1ac3ac..fed7131b0299 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -105,28 +105,12 @@ bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
(isWin64Prologue(MF) && MFI.hasCopyImplyingStackAdjustment()));
}
-static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) {
- if (IsLP64) {
- if (isInt<8>(Imm))
- return X86::SUB64ri8;
- return X86::SUB64ri32;
- } else {
- if (isInt<8>(Imm))
- return X86::SUB32ri8;
- return X86::SUB32ri;
- }
+static unsigned getSUBriOpcode(bool IsLP64) {
+ return IsLP64 ? X86::SUB64ri32 : X86::SUB32ri;
}
-static unsigned getADDriOpcode(bool IsLP64, int64_t Imm) {
- if (IsLP64) {
- if (isInt<8>(Imm))
- return X86::ADD64ri8;
- return X86::ADD64ri32;
- } else {
- if (isInt<8>(Imm))
- return X86::ADD32ri8;
- return X86::ADD32ri;
- }
+static unsigned getADDriOpcode(bool IsLP64) {
+ return IsLP64 ? X86::ADD64ri32 : X86::ADD32ri;
}
static unsigned getSUBrrOpcode(bool IsLP64) {
@@ -138,14 +122,7 @@ static unsigned getADDrrOpcode(bool IsLP64) {
}
static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
- if (IsLP64) {
- if (isInt<8>(Imm))
- return X86::AND64ri8;
- return X86::AND64ri32;
- }
- if (isInt<8>(Imm))
- return X86::AND32ri8;
- return X86::AND32ri;
+ return IsLP64 ? X86::AND64ri32 : X86::AND32ri;
}
static unsigned getLEArOpcode(bool IsLP64) {
@@ -363,8 +340,8 @@ MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
} else {
bool IsSub = Offset < 0;
uint64_t AbsOffset = IsSub ? -Offset : Offset;
- const unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
- : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
+ const unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr)
+ : getADDriOpcode(Uses64BitFramePtr);
MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
.addReg(StackPtr)
.addImm(AbsOffset);
@@ -400,9 +377,8 @@ int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
unsigned Opc = PI->getOpcode();
int Offset = 0;
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
- PI->getOperand(0).getReg() == StackPtr){
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD32ri) &&
+ PI->getOperand(0).getReg() == StackPtr) {
assert(PI->getOperand(1).getReg() == StackPtr);
Offset = PI->getOperand(2).getImm();
} else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
@@ -413,8 +389,7 @@ int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
PI->getOperand(5).getReg() == X86::NoRegister) {
// For LEAs we have: def = lea SP, FI, noreg, Offset, noreg.
Offset = PI->getOperand(4).getImm();
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB32ri) &&
PI->getOperand(0).getReg() == StackPtr) {
assert(PI->getOperand(1).getReg() == StackPtr);
Offset = -PI->getOperand(2).getImm();
@@ -833,7 +808,7 @@ void X86FrameLowering::emitStackProbeInlineGenericLoop(
// save loop bound
{
const unsigned BoundOffset = alignDown(Offset, StackProbeSize);
- const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, BoundOffset);
+ const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr);
BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed)
.addReg(FinalStackProbed)
.addImm(BoundOffset)
@@ -1336,7 +1311,7 @@ void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
{
const unsigned SUBOpc =
- getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
+ getSUBriOpcode(Uses64BitFramePtr);
BuildMI(headMBB, DL, TII.get(SUBOpc), StackPtr)
.addReg(StackPtr)
.addImm(StackProbeSize)
@@ -1367,7 +1342,7 @@ void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
.setMIFlag(MachineInstr::FrameSetup);
const unsigned SUBOpc =
- getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
+ getSUBriOpcode(Uses64BitFramePtr);
BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackPtr)
.addReg(StackPtr)
.addImm(StackProbeSize)
@@ -1800,7 +1775,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
.addImm(8)
.addUse(X86::NoRegister)
.setMIFlag(MachineInstr::FrameSetup);
- BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64ri8), X86::RSP)
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64ri32), X86::RSP)
.addUse(X86::RSP)
.addImm(8)
.setMIFlag(MachineInstr::FrameSetup);
@@ -2419,7 +2394,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
(Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
(Opc != X86::BTR64ri8 || !PI->getFlag(MachineInstr::FrameDestroy)) &&
- (Opc != X86::ADD64ri8 || !PI->getFlag(MachineInstr::FrameDestroy)))
+ (Opc != X86::ADD64ri32 || !PI->getFlag(MachineInstr::FrameDestroy)))
break;
FirstCSPop = PI;
}
@@ -3793,7 +3768,7 @@ MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
if (UsedReg == FramePtr) {
// ADD $offset, %ebp
- unsigned ADDri = getADDriOpcode(false, EndOffset);
+ unsigned ADDri = getADDriOpcode(false);
BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr)
.addReg(FramePtr)
.addImm(EndOffset)
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index e2149951aff0..bedeb783bcf6 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -3400,26 +3400,6 @@ bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
llvm_unreachable("Invalid opcode!");
}
};
- auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) {
- switch (Opc) {
- case X86ISD::ADD:
- return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0);
- case X86ISD::ADC:
- return SelectOpcode(X86::ADC64mi8, X86::ADC32mi8, X86::ADC16mi8, 0);
- case X86ISD::SUB:
- return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0);
- case X86ISD::SBB:
- return SelectOpcode(X86::SBB64mi8, X86::SBB32mi8, X86::SBB16mi8, 0);
- case X86ISD::AND:
- return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0);
- case X86ISD::OR:
- return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0);
- case X86ISD::XOR:
- return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0);
- default:
- llvm_unreachable("Invalid opcode!");
- }
- };
auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
switch (Opc) {
case X86ISD::ADD:
@@ -3468,12 +3448,7 @@ bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
}
- // First try to fit this into an Imm8 operand. If it doesn't fit, then try
- // the larger immediate operand.
- if (MemVT != MVT::i8 && isInt<8>(OperandV)) {
- Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
- NewOpc = SelectImm8Opcode(Opc);
- } else if (MemVT != MVT::i64 || isInt<32>(OperandV)) {
+ if (MemVT != MVT::i64 || isInt<32>(OperandV)) {
Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
NewOpc = SelectImmOpcode(Opc);
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 8201ce157a00..eeccb6045586 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -28916,7 +28916,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
SDValue Chain = Op->getOperand(0);
SDValue CopyRBP = DAG.getCopyFromReg(Chain, dl, X86::RBP, MVT::i64);
SDValue Result =
- SDValue(DAG.getMachineNode(X86::SUB64ri8, dl, MVT::i64, CopyRBP,
+ SDValue(DAG.getMachineNode(X86::SUB64ri32, dl, MVT::i64, CopyRBP,
DAG.getTargetConstant(8, dl, MVT::i32)),
0);
// Return { result, chain }.
@@ -36847,16 +36847,11 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
return SinkMBB;
}
-static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) {
- if (IsLP64) {
- if (isInt<8>(Imm))
- return X86::SUB64ri8;
+static unsigned getSUBriOpcode(bool IsLP64) {
+ if (IsLP64)
return X86::SUB64ri32;
- } else {
- if (isInt<8>(Imm))
- return X86::SUB32ri8;
+ else
return X86::SUB32ri;
- }
}
MachineBasicBlock *
@@ -36924,12 +36919,12 @@ X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
// The property we want to enforce is to never have more than [page alloc] between two probes.
const unsigned XORMIOpc =
- TFI.Uses64BitFramePtr ? X86::XOR64mi8 : X86::XOR32mi8;
+ TFI.Uses64BitFramePtr ? X86::XOR64mi32 : X86::XOR32mi;
addRegOffset(BuildMI(blockMBB, DL, TII->get(XORMIOpc)), physSPReg, false, 0)
.addImm(0);
BuildMI(blockMBB, DL,
- TII->get(getSUBriOpcode(TFI.Uses64BitFramePtr, ProbeSize)), physSPReg)
+ TII->get(getSUBriOpcode(TFI.Uses64BitFramePtr)), physSPReg)
.addReg(physSPReg)
.addImm(ProbeSize);
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index d20f00466a34..f5d20bb4fe40 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -2990,7 +2990,7 @@ let Predicates = [HasAVX512] in {
def : Pat<(insert_subvector (v16i1 immAllZerosV),
(v1i1 (scalar_to_vector GR8:$src)), (iPTR 0)),
- (KMOVWkr (AND32ri8
+ (KMOVWkr (AND32ri
(INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src, sub_8bit),
(i32 1)))>;
}
diff --git a/llvm/lib/Target/X86/X86InstrArithmetic.td b/llvm/lib/Target/X86/X86InstrArithmetic.td
index c4e4eb333882..fa92e58347ab 100644
--- a/llvm/lib/Target/X86/X86InstrArithmetic.td
+++ b/llvm/lib/Target/X86/X86InstrArithmetic.td
@@ -235,30 +235,17 @@ class BinOpRI8<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
let ImmT = Imm8; // Always 8-bit immediate.
}
-// BinOpRI8_F - Binary instructions with inputs "reg, imm8", where the pattern
-// has EFLAGS as a result.
-class BinOpRI8_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- SDPatternOperator opnode, Format f>
- : BinOpRI8<opcode, mnemonic, typeinfo, f, (outs), WriteALU,
- [(set EFLAGS,
- (opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2))]>;
+// BinOpRI8_F - Binary instructions with inputs "reg, imm8".
+class BinOpRI8_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo, Format f>
+ : BinOpRI8<opcode, mnemonic, typeinfo, f, (outs), WriteALU, []>;
-// BinOpRI8_RF - Binary instructions with inputs "reg, imm8", where the pattern
-// has both a regclass and EFLAGS as a result.
-class BinOpRI8_RF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- SDPatternOperator opnode, Format f>
- : BinOpRI8<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst), WriteALU,
- [(set typeinfo.RegClass:$dst, EFLAGS,
- (opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2))]>;
+// BinOpRI8_RF - Binary instructions with inputs "reg, imm8".
+class BinOpRI8_RF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo, Format f>
+ : BinOpRI8<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst), WriteALU, []>;
-// BinOpRI8_RFF - Binary instructions with inputs "reg, imm8", where the pattern
-// has both a regclass and EFLAGS as a result, and has EFLAGS as input.
-class BinOpRI8_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- SDPatternOperator opnode, Format f>
- : BinOpRI8<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst), WriteADC,
- [(set typeinfo.RegClass:$dst, EFLAGS,
- (opnode typeinfo.RegClass:$src1, typeinfo.Imm8Operator:$src2,
- EFLAGS))]>;
+// BinOpRI8_RFF - Binary instructions with inputs "reg, imm8".
+class BinOpRI8_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo, Format f>
+ : BinOpRI8<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst), WriteADC, []>;
// BinOpMR - Binary instructions with inputs "[mem], reg".
class BinOpMR<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
@@ -352,34 +339,17 @@ class BinOpMI8<string mnemonic, X86TypeInfo typeinfo,
let ImmT = Imm8; // Always 8-bit immediate.
}
-// BinOpMI8_RMW - Binary instructions with inputs "[mem], imm8", where the
-// pattern implicitly use EFLAGS.
-class BinOpMI8_RMW<string mnemonic, X86TypeInfo typeinfo,
- SDPatternOperator opnode, Format f>
- : BinOpMI8<mnemonic, typeinfo, f,
- [(store (opnode (load addr:$dst),
- typeinfo.Imm8Operator:$src), addr:$dst),
- (implicit EFLAGS)]>,
- Sched<[WriteALURMW]>;
+// BinOpMI8_RMW - Binary instructions with inputs "[mem], imm8".
+class BinOpMI8_RMW<string mnemonic, X86TypeInfo typeinfo, Format f>
+ : BinOpMI8<mnemonic, typeinfo, f, []>, Sched<[WriteALURMW]>;
-// BinOpMI8_RMW_FF - Binary instructions with inputs "[mem], imm8", where the
-// pattern sets EFLAGS and implicitly uses EFLAGS.
-class BinOpMI8_RMW_FF<string mnemonic, X86TypeInfo typeinfo,
- SDPatternOperator opnode, Format f>
- : BinOpMI8<mnemonic, typeinfo, f,
- [(store (opnode (load addr:$dst),
- typeinfo.Imm8Operator:$src, EFLAGS), addr:$dst),
- (implicit EFLAGS)]>,
- Sched<[WriteADCRMW]>;
+// BinOpMI8_RMW_FF - Binary instructions with inputs "[mem], imm8".
+class BinOpMI8_RMW_FF<string mnemonic, X86TypeInfo typeinfo, Format f>
+ : BinOpMI8<mnemonic, typeinfo, f, []>, Sched<[WriteADCRMW]>;
-// BinOpMI8_F - Binary instructions with inputs "[mem], imm8", where the pattern
-// has EFLAGS as a result.
-class BinOpMI8_F<string mnemonic, X86TypeInfo typeinfo,
- SDPatternOperator opnode, Format f>
- : BinOpMI8<mnemonic, typeinfo, f,
- [(set EFLAGS, (opnode (typeinfo.LoadNode addr:$dst),
- typeinfo.Imm8Operator:$src))]>,
- Sched<[WriteALU.Folded]>;
+// BinOpMI8_F - Binary instructions with inputs "[mem], imm8"
+class BinOpMI8_F<string mnemonic, X86TypeInfo typeinfo, Format f>
+ : BinOpMI8<mnemonic, typeinfo, f, []>, Sched<[WriteALU.Folded]>;
// BinOpAI - Binary instructions with input imm, that implicitly use A reg and
// implicitly define Areg and EFLAGS.
@@ -887,14 +857,14 @@ multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#32rm : BinOpRM_RF<BaseOpc2, mnemonic, Xi32, opnodeflag>;
def NAME#64rm : BinOpRM_RF<BaseOpc2, mnemonic, Xi64, opnodeflag>;
- let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ let isConvertibleToThreeAddress = ConvertibleToThreeAddress, hasSideEffects= 0 in {
def NAME#8ri : BinOpRI_RF<0x80, mnemonic, Xi8 , opnodeflag, RegMRM>;
// NOTE: These are order specific, we want the ri8 forms to be listed
// first so that they are slightly preferred to the ri forms.
- def NAME#16ri8 : BinOpRI8_RF<0x82, mnemonic, Xi16, opnodeflag, RegMRM>;
- def NAME#32ri8 : BinOpRI8_RF<0x82, mnemonic, Xi32, opnodeflag, RegMRM>;
- def NAME#64ri8 : BinOpRI8_RF<0x82, mnemonic, Xi64, opnodeflag, RegMRM>;
+ def NAME#16ri8 : BinOpRI8_RF<0x82, mnemonic, Xi16, RegMRM>;
+ def NAME#32ri8 : BinOpRI8_RF<0x82, mnemonic, Xi32, RegMRM>;
+ def NAME#64ri8 : BinOpRI8_RF<0x82, mnemonic, Xi64, RegMRM>;
def NAME#16ri : BinOpRI_RF<0x80, mnemonic, Xi16, opnodeflag, RegMRM>;
def NAME#32ri : BinOpRI_RF<0x80, mnemonic, Xi32, opnodeflag, RegMRM>;
@@ -902,34 +872,34 @@ multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
}
} // Constraints = "$src1 = $dst"
- let mayLoad = 1, mayStore = 1 in {
+ let mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
def NAME#8mr : BinOpMR_RMW<BaseOpc, mnemonic, Xi8 , opnode>;
def NAME#16mr : BinOpMR_RMW<BaseOpc, mnemonic, Xi16, opnode>;
def NAME#32mr : BinOpMR_RMW<BaseOpc, mnemonic, Xi32, opnode>;
def NAME#64mr : BinOpMR_RMW<BaseOpc, mnemonic, Xi64, opnode>;
- }
- // NOTE: These are order specific, we want the mi8 forms to be listed
- // first so that they are slightly preferred to the mi forms.
- def NAME#16mi8 : BinOpMI8_RMW<mnemonic, Xi16, opnode, MemMRM>;
- def NAME#32mi8 : BinOpMI8_RMW<mnemonic, Xi32, opnode, MemMRM>;
- let Predicates = [In64BitMode] in
- def NAME#64mi8 : BinOpMI8_RMW<mnemonic, Xi64, opnode, MemMRM>;
-
- def NAME#8mi : BinOpMI_RMW<0x80, mnemonic, Xi8 , opnode, MemMRM>;
- def NAME#16mi : BinOpMI_RMW<0x80, mnemonic, Xi16, opnode, MemMRM>;
- def NAME#32mi : BinOpMI_RMW<0x80, mnemonic, Xi32, opnode, MemMRM>;
- let Predicates = [In64BitMode] in
- def NAME#64mi32 : BinOpMI_RMW<0x80, mnemonic, Xi64, opnode, MemMRM>;
+ // NOTE: These are order specific, we want the mi8 forms to be listed
+ // first so that they are slightly preferred to the mi forms.
+ def NAME#16mi8 : BinOpMI8_RMW<mnemonic, Xi16, MemMRM>;
+ def NAME#32mi8 : BinOpMI8_RMW<mnemonic, Xi32, MemMRM>;
+ let Predicates = [In64BitMode] in
+ def NAME#64mi8 : BinOpMI8_RMW<mnemonic, Xi64, MemMRM>;
+
+ def NAME#8mi : BinOpMI_RMW<0x80, mnemonic, Xi8 , opnode, MemMRM>;
+ def NAME#16mi : BinOpMI_RMW<0x80, mnemonic, Xi16, opnode, MemMRM>;
+ def NAME#32mi : BinOpMI_RMW<0x80, mnemonic, Xi32, opnode, MemMRM>;
+ let Predicates = [In64BitMode] in
+ def NAME#64mi32 : BinOpMI_RMW<0x80, mnemonic, Xi64, opnode, MemMRM>;
+ }
// These are for the disassembler since 0x82 opcode behaves like 0x80, but
// not in 64-bit mode.
let Predicates = [Not64BitMode], isCodeGenOnly = 1, ForceDisassemble = 1,
hasSideEffects = 0 in {
let Constraints = "$src1 = $dst" in
- def NAME#8ri8 : BinOpRI8_RF<0x82, mnemonic, Xi8, null_frag, RegMRM>;
+ def NAME#8ri8 : BinOpRI8_RF<0x82, mnemonic, Xi8, RegMRM>;
let mayLoad = 1, mayStore = 1 in
- def NAME#8mi8 : BinOpMI8_RMW<mnemonic, Xi8, null_frag, MemMRM>;
+ def NAME#8mi8 : BinOpMI8_RMW<mnemonic, Xi8, MemMRM>;
}
} // Defs = [EFLAGS]
@@ -976,12 +946,12 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#8ri : BinOpRI_RFF<0x80, mnemonic, Xi8 , opnode, RegMRM>;
- let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ let isConvertibleToThreeAddress = ConvertibleToThreeAddress, hasSideEffects = 0 in {
// NOTE: These are order specific, we want the ri8 forms to be listed
// first so that they are slightly preferred to the ri forms.
- def NAME#16ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi16, opnode, RegMRM>;
- def NAME#32ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi32, opnode, RegMRM>;
- def NAME#64ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi64, opnode, RegMRM>;
+ def NAME#16ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi16, RegMRM>;
+ def NAME#32ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi32, RegMRM>;
+ def NAME#64ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi64, RegMRM>;
def NAME#16ri : BinOpRI_RFF<0x80, mnemonic, Xi16, opnode, RegMRM>;
def NAME#32ri : BinOpRI_RFF<0x80, mnemonic, Xi32, opnode, RegMRM>;
@@ -996,25 +966,27 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
// NOTE: These are order specific, we want the mi8 forms to be listed
// first so that they are slightly preferred to the mi forms.
- def NAME#16mi8 : BinOpMI8_RMW_FF<mnemonic, Xi16, opnode, MemMRM>;
- def NAME#32mi8 : BinOpMI8_RMW_FF<mnemonic, Xi32, opnode, MemMRM>;
+ let mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
+ def NAME#16mi8 : BinOpMI8_RMW_FF<mnemonic, Xi16, MemMRM>;
+ def NAME#32mi8 : BinOpMI8_RMW_FF<mnemonic, Xi32, MemMRM>;
let Predicates = [In64BitMode] in
- def NAME#64mi8 : BinOpMI8_RMW_FF<mnemonic, Xi64, opnode, MemMRM>;
+ def NAME#64mi8 : BinOpMI8_RMW_FF<mnemonic, Xi64, MemMRM>;
def NAME#8mi : BinOpMI_RMW_FF<0x80, mnemonic, Xi8 , opnode, MemMRM>;
def NAME#16mi : BinOpMI_RMW_FF<0x80, mnemonic, Xi16, opnode, MemMRM>;
def NAME#32mi : BinOpMI_RMW_FF<0x80, mnemonic, Xi32, opnode, MemMRM>;
let Predicates = [In64BitMode] in
def NAME#64mi32 : BinOpMI_RMW_FF<0x80, mnemonic, Xi64, opnode, MemMRM>;
+ }
// These are for the disassembler since 0x82 opcode behaves like 0x80, but
// not in 64-bit mode.
let Predicates = [Not64BitMode], isCodeGenOnly = 1, ForceDisassemble = 1,
hasSideEffects = 0 in {
let Constraints = "$src1 = $dst" in
- def NAME#8ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi8, null_frag, RegMRM>;
+ def NAME#8ri8 : BinOpRI8_RFF<0x82, mnemonic, Xi8, RegMRM>;
let mayLoad = 1, mayStore = 1 in
- def NAME#8mi8 : BinOpMI8_RMW_FF<mnemonic, Xi8, null_frag, MemMRM>;
+ def NAME#8mi8 : BinOpMI8_RMW_FF<mnemonic, Xi8, MemMRM>;
}
} // Uses = [EFLAGS], Defs = [EFLAGS]
@@ -1058,12 +1030,12 @@ multiclass ArithBinOp_F<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
def NAME#8ri : BinOpRI_F<0x80, mnemonic, Xi8 , opnode, RegMRM>;
- let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ let isConvertibleToThreeAddress = ConvertibleToThreeAddress, hasSideEffects = 0 in {
// NOTE: These are order specific, we want the ri8 forms to be listed
// first so that they are slightly preferred to the ri forms.
- def NAME#16ri8 : BinOpRI8_F<0x82, mnemonic, Xi16, opnode, RegMRM>;
- def NAME#32ri8 : BinOpRI8_F<0x82, mnemonic, Xi32, opnode, RegMRM>;
- def NAME#64ri8 : BinOpRI8_F<0x82, mnemonic, Xi64, opnode, RegMRM>;
+ def NAME#16ri8 : BinOpRI8_F<0x82, mnemonic, Xi16, RegMRM>;
+ def NAME#32ri8 : BinOpRI8_F<0x82, mnemonic, Xi32, RegMRM>;
+ def NAME#64ri8 : BinOpRI8_F<0x82, mnemonic, Xi64, RegMRM>;
def NAME#16ri : BinOpRI_F<0x80, mnemonic, Xi16, opnode, RegMRM>;
def NAME#32ri : BinOpRI_F<0x80, mnemonic, Xi32, opnode, RegMRM>;
@@ -1077,24 +1049,26 @@ multiclass ArithBinOp_F<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
// NOTE: These are order specific, we want the mi8 forms to be listed
// first so that they are slightly preferred to the mi forms.
- def NAME#16mi8 : BinOpMI8_F<mnemonic, Xi16, opnode, MemMRM>;
- def NAME#32mi8 : BinOpMI8_F<mnemonic, Xi32, opnode, MemMRM>;
- let Predicates = [In64BitMode] in
- def NAME#64mi8 : BinOpMI8_F<mnemonic, Xi64, opnode, MemMRM>;
-
- def NAME#8mi : BinOpMI_F<0x80, mnemonic, Xi8 , opnode, MemMRM>;
- def NAME#16mi : BinOpMI_F<0x80, mnemonic, Xi16, opnode, MemMRM>;
- def NAME#32mi : BinOpMI_F<0x80, mnemonic, Xi32, opnode, MemMRM>;
- let Predicates = [In64BitMode] in
- def NAME#64mi32 : BinOpMI_F<0x80, mnemonic, Xi64, opnode, MemMRM>;
+ let mayLoad = 1, hasSideEffects = 0 in {
+ def NAME#16mi8 : BinOpMI8_F<mnemonic, Xi16, MemMRM>;
+ def NAME#32mi8 : BinOpMI8_F<mnemonic, Xi32, MemMRM>;
+ let Predicates = [In64BitMode] in
+ def NAME#64mi8 : BinOpMI8_F<mnemonic, Xi64, MemMRM>;
+
+ def NAME#8mi : BinOpMI_F<0x80, mnemonic, Xi8 , opnode, MemMRM>;
+ def NAME#16mi : BinOpMI_F<0x80, mnemonic, Xi16, opnode, MemMRM>;
+ def NAME#32mi : BinOpMI_F<0x80, mnemonic, Xi32, opnode, MemMRM>;
+ let Predicates = [In64BitMode] in
+ def NAME#64mi32 : BinOpMI_F<0x80, mnemonic, Xi64, opnode, MemMRM>;
+ }
// These are for the disassembler since 0x82 opcode behaves like 0x80, but
// not in 64-bit mode.
let Predicates = [Not64BitMode], isCodeGenOnly = 1, ForceDisassemble = 1,
hasSideEffects = 0 in {
- def NAME#8ri8 : BinOpRI8_F<0x82, mnemonic, Xi8, null_frag, RegMRM>;
+ def NAME#8ri8 : BinOpRI8_F<0x82, mnemonic, Xi8, RegMRM>;
let mayLoad = 1 in
- def NAME#8mi8 : BinOpMI8_F<mnemonic, Xi8, null_frag, MemMRM>;
+ def NAME#8mi8 : BinOpMI8_F<mnemonic, Xi8, MemMRM>;
}
} // Defs = [EFLAGS]
@@ -1170,31 +1144,19 @@ def : Pat<(store (X86adc_flag GR64:$src, (loadi64 addr:$dst), EFLAGS),
multiclass ArithBinOp_RF_relocImm_Pats<SDNode OpNodeFlag, SDNode OpNode> {
def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2),
(!cast<Instruction>(NAME#"8ri") GR8:$src1, relocImm8_su:$src2)>;
- def : Pat<(OpNodeFlag GR16:$src1, i16relocImmSExt8_su:$src2),
- (!cast<Instruction>(NAME#"16ri8") GR16:$src1, i16relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2),
(!cast<Instruction>(NAME#"16ri") GR16:$src1, relocImm16_su:$src2)>;
- def : Pat<(OpNodeFlag GR32:$src1, i32relocImmSExt8_su:$src2),
- (!cast<Instruction>(NAME#"32ri8") GR32:$src1, i32relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2),
(!cast<Instruction>(NAME#"32ri") GR32:$src1, relocImm32_su:$src2)>;
- def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt8_su:$src2),
- (!cast<Instruction>(NAME#"64ri8") GR64:$src1, i64relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2),
(!cast<Instruction>(NAME#"64ri32") GR64:$src1, i64relocImmSExt32_su:$src2)>;
def : Pat<(store (OpNode (load addr:$dst), relocImm8_su:$src), addr:$dst),
(!cast<Instruction>(NAME#"8mi") addr:$dst, relocImm8_su:$src)>;
- def : Pat<(store (OpNode (load addr:$dst), i16relocImmSExt8_su:$src), addr:$dst),
- (!cast<Instruction>(NAME#"16mi8") addr:$dst, i16relocImmSExt8_su:$src)>;
def : Pat<(store (OpNode (load addr:$dst), relocImm16_su:$src), addr:$dst),
(!cast<Instruction>(NAME#"16mi") addr:$dst, relocImm16_su:$src)>;
- def : Pat<(store (OpNode (load addr:$dst), i32relocImmSExt8_su:$src), addr:$dst),
- (!cast<Instruction>(NAME#"32mi8") addr:$dst, i32relocImmSExt8_su:$src)>;
def : Pat<(store (OpNode (load addr:$dst), relocImm32_su:$src), addr:$dst),
(!cast<Instruction>(NAME#"32mi") addr:$dst, relocImm32_su:$src)>;
- def : Pat<(store (OpNode (load addr:$dst), i64relocImmSExt8_su:$src), addr:$dst),
- (!cast<Instruction>(NAME#"64mi8") addr:$dst, i64relocImmSExt8_su:$src)>;
def : Pat<(store (OpNode (load addr:$dst), i64relocImmSExt32_su:$src), addr:$dst),
(!cast<Instruction>(NAME#"64mi32") addr:$dst, i64relocImmSExt32_su:$src)>;
}
@@ -1202,31 +1164,19 @@ multiclass ArithBinOp_RF_relocImm_Pats<SDNode OpNodeFlag, SDNode OpNode> {
multiclass ArithBinOp_RFF_relocImm_Pats<SDNode OpNodeFlag> {
def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2, EFLAGS),
(!cast<Instruction>(NAME#"8ri") GR8:$src1, relocImm8_su:$src2)>;
- def : Pat<(OpNodeFlag GR16:$src1, i16relocImmSExt8_su:$src2, EFLAGS),
- (!cast<Instruction>(NAME#"16ri8") GR16:$src1, i16relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2, EFLAGS),
(!cast<Instruction>(NAME#"16ri") GR16:$src1, relocImm16_su:$src2)>;
- def : Pat<(OpNodeFlag GR32:$src1, i32relocImmSExt8_su:$src2, EFLAGS),
- (!cast<Instruction>(NAME#"32ri8") GR32:$src1, i32relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2, EFLAGS),
(!cast<Instruction>(NAME#"32ri") GR32:$src1, relocImm32_su:$src2)>;
- def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt8_su:$src2, EFLAGS),
- (!cast<Instruction>(NAME#"64ri8") GR64:$src1, i64relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2, EFLAGS),
(!cast<Instruction>(NAME#"64ri32") GR64:$src1, i64relocImmSExt32_su:$src2)>;
def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm8_su:$src, EFLAGS), addr:$dst),
(!cast<Instruction>(NAME#"8mi") addr:$dst, relocImm8_su:$src)>;
- def : Pat<(store (OpNodeFlag (load addr:$dst), i16relocImmSExt8_su:$src, EFLAGS), addr:$dst),
- (!cast<Instruction>(NAME#"16mi8") addr:$dst, i16relocImmSExt8_su:$src)>;
def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm16_su:$src, EFLAGS), addr:$dst),
(!cast<Instruction>(NAME#"16mi") addr:$dst, relocImm16_su:$src)>;
- def : Pat<(store (OpNodeFlag (load addr:$dst), i32relocImmSExt8_su:$src, EFLAGS), addr:$dst),
- (!cast<Instruction>(NAME#"32mi8") addr:$dst, i32relocImmSExt8_su:$src)>;
def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm32_su:$src, EFLAGS), addr:$dst),
(!cast<Instruction>(NAME#"32mi") addr:$dst, relocImm32_su:$src)>;
- def : Pat<(store (OpNodeFlag (load addr:$dst), i64relocImmSExt8_su:$src, EFLAGS), addr:$dst),
- (!cast<Instruction>(NAME#"64mi8") addr:$dst, i64relocImmSExt8_su:$src)>;
def : Pat<(store (OpNodeFlag (load addr:$dst), i64relocImmSExt32_su:$src, EFLAGS), addr:$dst),
(!cast<Instruction>(NAME#"64mi32") addr:$dst, i64relocImmSExt32_su:$src)>;
}
@@ -1234,31 +1184,19 @@ multiclass ArithBinOp_RFF_relocImm_Pats<SDNode OpNodeFlag> {
multiclass ArithBinOp_F_relocImm_Pats<SDNode OpNodeFlag> {
def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2),
(!cast<Instruction>(NAME#"8ri") GR8:$src1, relocImm8_su:$src2)>;
- def : Pat<(OpNodeFlag GR16:$src1, i16relocImmSExt8_su:$src2),
- (!cast<Instruction>(NAME#"16ri8") GR16:$src1, i16relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2),
(!cast<Instruction>(NAME#"16ri") GR16:$src1, relocImm16_su:$src2)>;
- def : Pat<(OpNodeFlag GR32:$src1, i32relocImmSExt8_su:$src2),
- (!cast<Instruction>(NAME#"32ri8") GR32:$src1, i32relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2),
(!cast<Instruction>(NAME#"32ri") GR32:$src1, relocImm32_su:$src2)>;
- def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt8_su:$src2),
- (!cast<Instruction>(NAME#"64ri8") GR64:$src1, i64relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2),
(!cast<Instruction>(NAME#"64ri32") GR64:$src1, i64relocImmSExt32_su:$src2)>;
def : Pat<(OpNodeFlag (loadi8 addr:$src1), relocImm8_su:$src2),
(!cast<Instruction>(NAME#"8mi") addr:$src1, relocImm8_su:$src2)>;
- def : Pat<(OpNodeFlag (loadi16 addr:$src1), i16relocImmSExt8_su:$src2),
- (!cast<Instruction>(NAME#"16mi8") addr:$src1, i16relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag (loadi16 addr:$src1), relocImm16_su:$src2),
(!cast<Instruction>(NAME#"16mi") addr:$src1, relocImm16_su:$src2)>;
- def : Pat<(OpNodeFlag (loadi32 addr:$src1), i32relocImmSExt8_su:$src2),
- (!cast<Instruction>(NAME#"32mi8") addr:$src1, i32relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag (loadi32 addr:$src1), relocImm32_su:$src2),
(!cast<Instruction>(NAME#"32mi") addr:$src1, relocImm32_su:$src2)>;
- def : Pat<(OpNodeFlag (loadi64 addr:$src1), i64relocImmSExt8_su:$src2),
- (!cast<Instruction>(NAME#"64mi8") addr:$src1, i64relocImmSExt8_su:$src2)>;
def : Pat<(OpNodeFlag (loadi64 addr:$src1), i64relocImmSExt32_su:$src2),
(!cast<Instruction>(NAME#"64mi32") addr:$src1, i64relocImmSExt32_su:$src2)>;
}
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 36324d133886..cd0cdf6cb002 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -1225,12 +1225,12 @@ def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
// binary size compared to a regular MOV, but it introduces an unnecessary
// load, so is not suitable for regular or optsize functions.
let Predicates = [OptForMinSize] in {
-def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>;
-def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>;
-def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>;
-def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>;
-def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>;
-def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>;
+def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi addr:$dst, 0)>;
+def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi addr:$dst, 0)>;
+def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi32 addr:$dst, 0)>;
+def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi addr:$dst, -1)>;
+def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi addr:$dst, -1)>;
+def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi32 addr:$dst, -1)>;
}
// In kernel code model, we can get the address of a label
@@ -1526,35 +1526,16 @@ def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
[(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
} // isCommutable
-// NOTE: These are order specific, we want the ri8 forms to be listed
-// first so that they are slightly preferred to the ri forms.
-
def ADD8ri_DB : I<0, Pseudo,
(outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
"", // orb/addb REG, imm8
[(set GR8:$dst, (or_is_add GR8:$src1, imm:$src2))]>;
-def ADD16ri8_DB : I<0, Pseudo,
- (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
- "", // orw/addw REG, imm8
- [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"", // orw/addw REG, imm
[(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
-
-def ADD32ri8_DB : I<0, Pseudo,
- (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
- "", // orl/addl REG, imm8
- [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"", // orl/addl REG, imm
[(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
-
-
-def ADD64ri8_DB : I<0, Pseudo,
- (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
- "", // orq/addq REG, imm8
- [(set GR64:$dst, (or_is_add GR64:$src1,
- i64immSExt8:$src2))]>;
def ADD64ri32_DB : I<0, Pseudo,
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"", // orq/addq REG, imm
@@ -1585,26 +1566,26 @@ def : Pat<(xor GR32:$src1, -2147483648),
// Odd encoding trick: -128 fits into an 8-bit immediate field while
// +128 doesn't, so in this special case use a sub instead of an add.
def : Pat<(add GR16:$src1, 128),
- (SUB16ri8 GR16:$src1, -128)>;
+ (SUB16ri GR16:$src1, -128)>;
def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
- (SUB16mi8 addr:$dst, -128)>;
+ (SUB16mi addr:$dst, -128)>;
def : Pat<(add GR32:$src1, 128),
- (SUB32ri8 GR32:$src1, -128)>;
+ (SUB32ri GR32:$src1, -128)>;
def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
- (SUB32mi8 addr:$dst, -128)>;
+ (SUB32mi addr:$dst, -128)>;
def : Pat<(add GR64:$src1, 128),
- (SUB64ri8 GR64:$src1, -128)>;
+ (SUB64ri32 GR64:$src1, -128)>;
def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
- (SUB64mi8 addr:$dst, -128)>;
+ (SUB64mi32 addr:$dst, -128)>;
def : Pat<(X86add_flag_nocf GR16:$src1, 128),
- (SUB16ri8 GR16:$src1, -128)>;
+ (SUB16ri GR16:$src1, -128)>;
def : Pat<(X86add_flag_nocf GR32:$src1, 128),
- (SUB32ri8 GR32:$src1, -128)>;
+ (SUB32ri GR32:$src1, -128)>;
def : Pat<(X86add_flag_nocf GR64:$src1, 128),
- (SUB64ri8 GR64:$src1, -128)>;
+ (SUB64ri32 GR64:$src1, -128)>;
// The same trick applies for 32-bit immediate fields in 64-bit
// instructions.
@@ -1612,7 +1593,6 @@ def : Pat<(add GR64:$src1, 0x0000000080000000),
(SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
(SUB64mi32 addr:$dst, 0xffffffff80000000)>;
-
def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
(SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
@@ -1625,14 +1605,6 @@ def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
// AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
let AddedComplexity = 1 in {
-def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
- (SUBREG_TO_REG
- (i64 0),
- (AND32ri8
- (EXTRACT_SUBREG GR64:$src, sub_32bit),
- (i32 (GetLo32XForm imm:$imm))),
- sub_32bit)>;
-
def : Pat<(and GR64:$src, i64immZExt32:$imm),
(SUBREG_TO_REG
(i64 0),
@@ -2057,14 +2029,7 @@ def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
-def : Pat<(add GR16:$src1, i16immSExt8:$src2),
- (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(add GR32:$src1, i32immSExt8:$src2),
- (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
-def : Pat<(add GR64:$src1, i64immSExt8:$src2),
- (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(add GR64:$src1, i64immSExt32:$src2),
- (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
+def : Pat<(add GR64:$src1, i64immSExt32:$src2), (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
// sub reg, reg
def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
@@ -2089,12 +2054,6 @@ def : Pat<(sub GR16:$src1, imm:$src2),
(SUB16ri GR16:$src1, imm:$src2)>;
def : Pat<(sub GR32:$src1, imm:$src2),
(SUB32ri GR32:$src1, imm:$src2)>;
-def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
- (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
- (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
-def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
- (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
(SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
@@ -2190,12 +2149,6 @@ def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
-def : Pat<(or GR16:$src1, i16immSExt8:$src2),
- (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(or GR32:$src1, i32immSExt8:$src2),
- (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
-def : Pat<(or GR64:$src1, i64immSExt8:$src2),
- (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
def : Pat<(or GR64:$src1, i64immSExt32:$src2),
(OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
@@ -2222,12 +2175,6 @@ def : Pat<(xor GR16:$src1, imm:$src2),
(XOR16ri GR16:$src1, imm:$src2)>;
def : Pat<(xor GR32:$src1, imm:$src2),
(XOR32ri GR32:$src1, imm:$src2)>;
-def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
- (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
- (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
-def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
- (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
(XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
@@ -2254,12 +2201,6 @@ def : Pat<(and GR16:$src1, imm:$src2),
(AND16ri GR16:$src1, imm:$src2)>;
def : Pat<(and GR32:$src1, imm:$src2),
(AND32ri GR32:$src1, imm:$src2)>;
-def : Pat<(and GR16:$src1, i16immSExt8:$src2),
- (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(and GR32:$src1, i32immSExt8:$src2),
- (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
-def : Pat<(and GR64:$src1, i64immSExt8:$src2),
- (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
def : Pat<(and GR64:$src1, i64immSExt32:$src2),
(AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 1f66035fce76..2dff9ceeffde 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -1207,9 +1207,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
case X86::ADD8ri:
case X86::ADD8ri_DB:
case X86::ADD16ri:
- case X86::ADD16ri8:
case X86::ADD16ri_DB:
- case X86::ADD16ri8_DB:
addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm());
break;
case X86::ADD8rr:
@@ -1520,18 +1518,14 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
case X86::ADD16rr_DB:
return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
case X86::ADD64ri32:
- case X86::ADD64ri8:
case X86::ADD64ri32_DB:
- case X86::ADD64ri8_DB:
assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
NewMI = addOffset(
BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src),
MI.getOperand(2));
break;
case X86::ADD32ri:
- case X86::ADD32ri8:
- case X86::ADD32ri_DB:
- case X86::ADD32ri8_DB: {
+ case X86::ADD32ri_DB: {
assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
@@ -1559,16 +1553,12 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
Is8BitOp = true;
[[fallthrough]];
case X86::ADD16ri:
- case X86::ADD16ri8:
case X86::ADD16ri_DB:
- case X86::ADD16ri8_DB:
return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
case X86::SUB8ri:
- case X86::SUB16ri8:
case X86::SUB16ri:
/// FIXME: Support these similar to ADD8ri/ADD16ri*.
return nullptr;
- case X86::SUB32ri8:
case X86::SUB32ri: {
if (!MI.getOperand(2).isImm())
return nullptr;
@@ -1599,7 +1589,6 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
break;
}
- case X86::SUB64ri8:
case X86::SUB64ri32: {
if (!MI.getOperand(2).isImm())
return nullptr;
@@ -4040,11 +4029,8 @@ bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
switch (MI.getOpcode()) {
default: break;
case X86::CMP64ri32:
- case X86::CMP64ri8:
case X86::CMP32ri:
- case X86::CMP32ri8:
case X86::CMP16ri:
- case X86::CMP16ri8:
case X86::CMP8ri:
SrcReg = MI.getOperand(0).getReg();
SrcReg2 = 0;
@@ -4075,11 +4061,8 @@ bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
CmpValue = 0;
return true;
case X86::SUB64ri32:
- case X86::SUB64ri8:
case X86::SUB32ri:
- case X86::SUB32ri8:
case X86::SUB16ri:
- case X86::SUB16ri8:
case X86::SUB8ri:
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
@@ -4147,18 +4130,12 @@ bool X86InstrInfo::isRedundantFlagInstr(const MachineInstr &FlagI,
return false;
}
case X86::CMP64ri32:
- case X86::CMP64ri8:
case X86::CMP32ri:
- case X86::CMP32ri8:
case X86::CMP16ri:
- case X86::CMP16ri8:
case X86::CMP8ri:
case X86::SUB64ri32:
- case X86::SUB64ri8:
case X86::SUB32ri:
- case X86::SUB32ri8:
case X86::SUB16ri:
- case X86::SUB16ri8:
case X86::SUB8ri:
case X86::TEST64rr:
case X86::TEST32rr:
@@ -4233,25 +4210,21 @@ inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag,
case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8:
return getTruncatedShiftCount(MI, 3) != 0;
- case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri:
- case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8:
+ case X86::SUB64ri32: case X86::SUB32ri: case X86::SUB16ri:
case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr:
case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm:
case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm:
case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r:
- case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
- case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8:
+ case X86::ADD64ri32: case X86::ADD32ri: case X86::ADD16ri:
case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr:
case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm:
case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm:
case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r:
- case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri:
- case X86::ADC32ri8: case X86::ADC16ri: case X86::ADC16ri8:
+ case X86::ADC64ri32: case X86::ADC32ri: case X86::ADC16ri:
case X86::ADC8ri: case X86::ADC64rr: case X86::ADC32rr:
case X86::ADC16rr: case X86::ADC8rr: case X86::ADC64rm:
case X86::ADC32rm: case X86::ADC16rm: case X86::ADC8rm:
- case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri:
- case X86::SBB32ri8: case X86::SBB16ri: case X86::SBB16ri8:
+ case X86::SBB64ri32: case X86::SBB32ri: case X86::SBB16ri:
case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr:
case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm:
case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm:
@@ -4266,18 +4239,15 @@ inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag,
case X86::TZCNT32rr: case X86::TZCNT32rm:
case X86::TZCNT64rr: case X86::TZCNT64rm:
return true;
- case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri:
- case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8:
+ case X86::AND64ri32: case X86::AND32ri: case X86::AND16ri:
case X86::AND8ri: case X86::AND64rr: case X86::AND32rr:
case X86::AND16rr: case X86::AND8rr: case X86::AND64rm:
case X86::AND32rm: case X86::AND16rm: case X86::AND8rm:
- case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri:
- case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8:
+ case X86::XOR64ri32: case X86::XOR32ri: case X86::XOR16ri:
case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr:
case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm:
case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm:
- case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri:
- case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8:
+ case X86::OR64ri32: case X86::OR32ri: case X86::OR16ri:
case X86::OR8ri: case X86::OR64rr: case X86::OR32rr:
case X86::OR16rr: case X86::OR8rr: case X86::OR64rm:
case X86::OR32rm: case X86::OR16rm: case X86::OR8rm:
@@ -4376,11 +4346,8 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
switch (CmpInstr.getOpcode()) {
default: break;
case X86::SUB64ri32:
- case X86::SUB64ri8:
case X86::SUB32ri:
- case X86::SUB32ri8:
case X86::SUB16ri:
- case X86::SUB16ri8:
case X86::SUB8ri:
case X86::SUB64rm:
case X86::SUB32rm:
@@ -4405,11 +4372,8 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
case X86::SUB16rr: NewOpcode = X86::CMP16rr; break;
case X86::SUB8rr: NewOpcode = X86::CMP8rr; break;
case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break;
- case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break;
case X86::SUB32ri: NewOpcode = X86::CMP32ri; break;
- case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break;
case X86::SUB16ri: NewOpcode = X86::CMP16ri; break;
- case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break;
case X86::SUB8ri: NewOpcode = X86::CMP8ri; break;
}
CmpInstr.setDesc(get(NewOpcode));
@@ -5190,9 +5154,6 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
case X86::ADD16ri_DB: MIB->setDesc(get(X86::OR16ri)); break;
case X86::ADD32ri_DB: MIB->setDesc(get(X86::OR32ri)); break;
case X86::ADD64ri32_DB: MIB->setDesc(get(X86::OR64ri32)); break;
- case X86::ADD16ri8_DB: MIB->setDesc(get(X86::OR16ri8)); break;
- case X86::ADD32ri8_DB: MIB->setDesc(get(X86::OR32ri8)); break;
- case X86::ADD64ri8_DB: MIB->setDesc(get(X86::OR64ri8)); break;
}
return false;
}
@@ -6411,9 +6372,9 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
switch (MI.getOpcode()) {
default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
- case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
- case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
- case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri; RCSize = 2; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri; RCSize = 4; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri32; RCSize = 8; break;
}
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
@@ -6790,9 +6751,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
switch (MI.getOpcode()) {
default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
- case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
- case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
- case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
}
// Change to CMPXXri r, 0 first.
MI.setDesc(get(NewOpc));
@@ -7088,11 +7049,8 @@ bool X86InstrInfo::unfoldMemoryOperand(
switch (DataMI->getOpcode()) {
default: break;
case X86::CMP64ri32:
- case X86::CMP64ri8:
case X86::CMP32ri:
- case X86::CMP32ri8:
case X86::CMP16ri:
- case X86::CMP16ri8:
case X86::CMP8ri: {
MachineOperand &MO0 = DataMI->getOperand(0);
MachineOperand &MO1 = DataMI->getOperand(1);
@@ -7100,11 +7058,8 @@ bool X86InstrInfo::unfoldMemoryOperand(
unsigned NewOpc;
switch (DataMI->getOpcode()) {
default: llvm_unreachable("Unreachable!");
- case X86::CMP64ri8:
case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
- case X86::CMP32ri8:
case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
- case X86::CMP16ri8:
case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
}
@@ -7217,20 +7172,14 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
switch (Opc) {
default: break;
case X86::CMP64ri32:
- case X86::CMP64ri8:
case X86::CMP32ri:
- case X86::CMP32ri8:
case X86::CMP16ri:
- case X86::CMP16ri8:
case X86::CMP8ri:
if (isNullConstant(BeforeOps[1])) {
switch (Opc) {
default: llvm_unreachable("Unreachable!");
- case X86::CMP64ri8:
case X86::CMP64ri32: Opc = X86::TEST64rr; break;
- case X86::CMP32ri8:
case X86::CMP32ri: Opc = X86::TEST32rr; break;
- case X86::CMP16ri8:
case X86::CMP16ri: Opc = X86::TEST16rr; break;
case X86::CMP8ri: Opc = X86::TEST8rr; break;
}
diff --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp
index d7b2d0a61fbc..04d0e4afa094 100644
--- a/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -838,11 +838,11 @@ bool X86InstructionSelector::selectZext(MachineInstr &I,
if (DstTy == LLT::scalar(8))
AndOpc = X86::AND8ri;
else if (DstTy == LLT::scalar(16))
- AndOpc = X86::AND16ri8;
+ AndOpc = X86::AND16ri;
else if (DstTy == LLT::scalar(32))
- AndOpc = X86::AND32ri8;
+ AndOpc = X86::AND32ri;
else if (DstTy == LLT::scalar(64))
- AndOpc = X86::AND64ri8;
+ AndOpc = X86::AND64ri32;
else
return false;
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index 9194f4485eb9..b60916eaeeae 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -405,7 +405,7 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
X86::optimizeVPCMPWithImmediateOneOrSix(OutMI) ||
X86::optimizeMOVSX(OutMI) || X86::optimizeINCDEC(OutMI, In64BitMode) ||
X86::optimizeMOV(OutMI, In64BitMode) ||
- X86::optimizeToFixedRegisterForm(OutMI))
+ X86::optimizeToFixedRegisterOrShortImmediateForm(OutMI))
return;
// Handle a few special cases to eliminate operand modifiers.
diff --git a/llvm/test/CodeGen/MIR/X86/branch-folder-with-label.mir b/llvm/test/CodeGen/MIR/X86/branch-folder-with-label.mir
index 610bbea17e2f..c3d1836e8237 100644
--- a/llvm/test/CodeGen/MIR/X86/branch-folder-with-label.mir
+++ b/llvm/test/CodeGen/MIR/X86/branch-folder-with-label.mir
@@ -355,7 +355,7 @@ body: |
bb.5.if.then:
liveins: $eax
- $rsp = frame-destroy ADD64ri8 $rsp, 8, implicit-def dead $eflags
+ $rsp = frame-destroy ADD64ri32 $rsp, 8, implicit-def dead $eflags
CFI_INSTRUCTION def_cfa_offset 24
$rbx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
CFI_INSTRUCTION def_cfa_offset 16
@@ -367,7 +367,7 @@ body: |
successors: %bb.8(0x30000000), %bb.7(0x50000000)
liveins: $rbx, $r14
- CMP32mi8 $rsp, 1, $noreg, 4, $noreg, 0, implicit-def $eflags :: (dereferenceable load (s32) from %ir.idx)
+ CMP32mi $rsp, 1, $noreg, 4, $noreg, 0, implicit-def $eflags :: (dereferenceable load (s32) from %ir.idx)
JCC_1 %bb.8, 8, implicit killed $eflags
JMP_1 %bb.7
@@ -375,7 +375,7 @@ body: |
successors: %bb.8(0x30000000), %bb.3(0x50000000)
liveins: $rbx, $r14
- CMP32mi8 renamable $rbx, 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (load (s32) from %ir.1)
+ CMP32mi renamable $rbx, 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (load (s32) from %ir.1)
JCC_1 %bb.3, 5, implicit killed $eflags
JMP_1 %bb.8
diff --git a/llvm/test/CodeGen/X86/AMX/amx-greedy-ra-spill-shape.ll b/llvm/test/CodeGen/X86/AMX/amx-greedy-ra-spill-shape.ll
index a1c8fafaad3e..87f1a3fdd33e 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-greedy-ra-spill-shape.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-greedy-ra-spill-shape.ll
@@ -33,10 +33,10 @@ define void @foo(i32 %M, i32 %N, i32 %K, ptr %A, ptr %B_rcr4, ptr %C, i32 %c_row
; CHECK-NEXT: [[MOV32rm2:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.3, 1, $noreg, 0, $noreg :: (load (s32) from %fixed-stack.3, align 16)
; CHECK-NEXT: MOV8mr %stack.0, 1, $noreg, 49, $noreg, [[MOV32rm2]].sub_8bit :: (store (s512) into %stack.0 + 49, align 1, basealign 4)
; CHECK-NEXT: MOV8mr %stack.0, 1, $noreg, 48, $noreg, [[MOV32rm2]].sub_8bit :: (store (s512) into %stack.0 + 48, align 4)
- ; CHECK-NEXT: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[AND32ri8_]], -64, implicit-def dead $eflags
+ ; CHECK-NEXT: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[AND32ri_]], -64, implicit-def dead $eflags
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY83]].sub_32bit
; CHECK-NEXT: MOV16mr %stack.0, 1, $noreg, 18, $noreg, [[COPY2]].sub_16bit :: (store (s512) into %stack.0 + 18, align 2, basealign 4)
- ; CHECK-NEXT: [[SUB32rr:%[0-9]+]]:gr32 = SUB32rr [[SUB32rr]], [[AND32ri8_]], implicit-def dead $eflags
+ ; CHECK-NEXT: [[SUB32rr:%[0-9]+]]:gr32 = SUB32rr [[SUB32rr]], [[AND32ri_]], implicit-def dead $eflags
; CHECK-NEXT: MOV16mr %stack.0, 1, $noreg, 18, $noreg, [[SUB32rr]].sub_16bit :: (store (s512) into %stack.0 + 18, align 2, basealign 4)
; CHECK-NEXT: [[MOVZX32rr16_:%[0-9]+]]:gr32 = MOVZX32rr16 [[SUB32rr]].sub_16bit
; CHECK-NEXT: MOV8mr %stack.0, 1, $noreg, 50, $noreg, [[MOVZX32rr16_]].sub_8bit :: (store (s512) into %stack.0 + 50, align 2, basealign 4)
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-blsi.mir b/llvm/test/CodeGen/X86/GlobalISel/select-blsi.mir
index 307eadf677ab..010665faddb3 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-blsi.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-blsi.mir
@@ -58,8 +58,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK-NEXT: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
- ; CHECK-NEXT: [[SUB32ri8_:%[0-9]+]]:gr32 = SUB32ri8 [[MOV32r0_]], 0, implicit-def $eflags
- ; CHECK-NEXT: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[SUB32ri8_]], [[COPY]], implicit-def $eflags
+ ; CHECK-NEXT: [[SUB32ri:%[0-9]+]]:gr32 = SUB32ri [[MOV32r0_]], 0, implicit-def $eflags
+ ; CHECK-NEXT: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[SUB32ri]], [[COPY]], implicit-def $eflags
; CHECK-NEXT: $edi = COPY [[AND32rr]]
%0(s32) = COPY $edi
%1(s32) = G_CONSTANT i32 0
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-cmp.mir b/llvm/test/CodeGen/X86/GlobalISel/select-cmp.mir
index 929b0bf64733..9eb96722a4ae 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-cmp.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-cmp.mir
@@ -104,8 +104,8 @@ body: |
; CHECK: CMP8rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s8) = COPY $dil
%1(s8) = COPY $sil
@@ -140,8 +140,8 @@ body: |
; CHECK: CMP16rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s16) = COPY $di
%1(s16) = COPY $si
@@ -176,8 +176,8 @@ body: |
; CHECK: CMP64rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s64) = COPY $rdi
%1(s64) = COPY $rsi
@@ -212,8 +212,8 @@ body: |
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
@@ -248,8 +248,8 @@ body: |
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
@@ -284,8 +284,8 @@ body: |
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
@@ -320,8 +320,8 @@ body: |
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
@@ -356,8 +356,8 @@ body: |
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
@@ -392,8 +392,8 @@ body: |
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
@@ -428,8 +428,8 @@ body: |
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
@@ -464,8 +464,8 @@ body: |
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 13, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
@@ -500,8 +500,8 @@ body: |
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 12, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
@@ -536,8 +536,8 @@ body: |
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 14, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[MOVZX32rr8_]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir b/llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
index 3c265c537364..60171e5bee1f 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
@@ -40,8 +40,8 @@ body: |
; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
; ALL: [[DEF:%[0-9]+]]:gr64 = IMPLICIT_DEF
; ALL: [[INSERT_SUBREG:%[0-9]+]]:gr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_8bit
- ; ALL: [[AND64ri8_:%[0-9]+]]:gr64 = AND64ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
- ; ALL: $rax = COPY [[AND64ri8_]]
+ ; ALL: [[AND64ri32_:%[0-9]+]]:gr64 = AND64ri32 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; ALL: $rax = COPY [[AND64ri32_]]
; ALL: RET 0, implicit $rax
%0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-ext.mir b/llvm/test/CodeGen/X86/GlobalISel/select-ext.mir
index 5bb95b9bfaf6..44daf22b00a3 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-ext.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-ext.mir
@@ -98,16 +98,16 @@ body: |
; X86-NEXT: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
; X86-NEXT: [[DEF:%[0-9]+]]:gr16 = IMPLICIT_DEF
; X86-NEXT: [[INSERT_SUBREG:%[0-9]+]]:gr16 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.sub_8bit
- ; X86-NEXT: [[AND16ri8_:%[0-9]+]]:gr16 = AND16ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
- ; X86-NEXT: $ax = COPY [[AND16ri8_]]
+ ; X86-NEXT: [[AND16ri_:%[0-9]+]]:gr16 = AND16ri [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; X86-NEXT: $ax = COPY [[AND16ri_]]
; X86-NEXT: RET 0, implicit $ax
; X64-LABEL: name: test_zext_i1toi16
; X64: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; X64-NEXT: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
; X64-NEXT: [[DEF:%[0-9]+]]:gr16 = IMPLICIT_DEF
; X64-NEXT: [[INSERT_SUBREG:%[0-9]+]]:gr16 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.sub_8bit
- ; X64-NEXT: [[AND16ri8_:%[0-9]+]]:gr16 = AND16ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
- ; X64-NEXT: $ax = COPY [[AND16ri8_]]
+ ; X64-NEXT: [[AND16ri_:%[0-9]+]]:gr16 = AND16ri [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; X64-NEXT: $ax = COPY [[AND16ri_]]
; X64-NEXT: RET 0, implicit $ax
%0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
@@ -135,16 +135,16 @@ body: |
; X86-NEXT: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
; X86-NEXT: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
; X86-NEXT: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.sub_8bit
- ; X86-NEXT: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
- ; X86-NEXT: $eax = COPY [[AND32ri8_]]
+ ; X86-NEXT: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; X86-NEXT: $eax = COPY [[AND32ri_]]
; X86-NEXT: RET 0, implicit $eax
; X64-LABEL: name: test_zext_i1
; X64: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; X64-NEXT: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
; X64-NEXT: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
; X64-NEXT: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.sub_8bit
- ; X64-NEXT: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
- ; X64-NEXT: $eax = COPY [[AND32ri8_]]
+ ; X64-NEXT: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; X64-NEXT: $eax = COPY [[AND32ri_]]
; X64-NEXT: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-zext.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-zext.mir
index c6a7a03b606c..2083b10f05b1 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-zext.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-zext.mir
@@ -105,8 +105,8 @@ body: |
; CHECK: liveins: $edi
; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; CHECK: [[AND16ri8_:%[0-9]+]]:gr16 = AND16ri8 [[COPY1]], 1, implicit-def $eflags
- ; CHECK: $ax = COPY [[AND16ri8_]]
+ ; CHECK: [[AND16ri_:%[0-9]+]]:gr16 = AND16ri [[COPY1]], 1, implicit-def $eflags
+ ; CHECK: $ax = COPY [[AND16ri_]]
; CHECK: RET 0, implicit $ax
%1:gpr(s32) = COPY $edi
%3:gpr(s16) = G_CONSTANT i16 1
@@ -135,8 +135,8 @@ body: |
; CHECK-LABEL: name: zext_i1_to_i32
; CHECK: liveins: $edi
; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[COPY]], 1, implicit-def $eflags
- ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: [[AND32ri_:%[0-9]+]]:gr32 = AND32ri [[COPY]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri_]]
; CHECK: RET 0, implicit $eax
%1:gpr(s32) = COPY $edi
%3:gpr(s32) = G_CONSTANT i32 1
@@ -167,8 +167,8 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK: [[DEF:%[0-9]+]]:gr64 = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32bit
- ; CHECK: [[AND64ri8_:%[0-9]+]]:gr64 = AND64ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
- ; CHECK: $rax = COPY [[AND64ri8_]]
+ ; CHECK: [[AND64ri32_:%[0-9]+]]:gr64 = AND64ri32 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: $rax = COPY [[AND64ri32_]]
; CHECK: RET 0, implicit $rax
%1:gpr(s32) = COPY $edi
%3:gpr(s64) = G_CONSTANT i64 1
diff --git a/llvm/test/CodeGen/X86/avxvnni-combine.ll b/llvm/test/CodeGen/X86/avxvnni-combine.ll
index edd27bb63c70..82c24594453b 100644
--- a/llvm/test/CodeGen/X86/avxvnni-combine.ll
+++ b/llvm/test/CodeGen/X86/avxvnni-combine.ll
@@ -42,99 +42,52 @@ define <2 x i64> @foo_reg_128(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2, <2 x i64
declare <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32>, <4 x i32>, <4 x i32>) #1
define <2 x i64> @foo_128(i32 %0, <2 x i64> %1, <2 x i64> %2, ptr %3) {
-; ADL-LABEL: foo_128:
-; ADL: # %bb.0:
-; ADL-NEXT: testl %edi, %edi
-; ADL-NEXT: jle .LBB1_6
-; ADL-NEXT: # %bb.1:
-; ADL-NEXT: movl %edi, %edx
-; ADL-NEXT: movl %edx, %eax
-; ADL-NEXT: andl $3, %eax
-; ADL-NEXT: cmpl $4, %edi
-; ADL-NEXT: jae .LBB1_7
-; ADL-NEXT: # %bb.2:
-; ADL-NEXT: xorl %ecx, %ecx
-; ADL-NEXT: jmp .LBB1_3
-; ADL-NEXT: .LBB1_7:
-; ADL-NEXT: andl $-4, %edx
-; ADL-NEXT: leaq 48(%rsi), %rdi
-; ADL-NEXT: xorl %ecx, %ecx
-; ADL-NEXT: .p2align 4, 0x90
-; ADL-NEXT: .LBB1_8: # =>This Inner Loop Header: Depth=1
-; ADL-NEXT: {vex} vpdpwssd -48(%rdi), %xmm1, %xmm0
-; ADL-NEXT: vpmaddwd -32(%rdi), %xmm1, %xmm2
-; ADL-NEXT: vpmaddwd -16(%rdi), %xmm1, %xmm3
-; ADL-NEXT: vpaddd %xmm2, %xmm0, %xmm0
-; ADL-NEXT: vpaddd %xmm3, %xmm0, %xmm0
-; ADL-NEXT: vpmaddwd (%rdi), %xmm1, %xmm2
-; ADL-NEXT: vpaddd %xmm2, %xmm0, %xmm0
-; ADL-NEXT: addq $4, %rcx
-; ADL-NEXT: addq $64, %rdi
-; ADL-NEXT: cmpq %rcx, %rdx
-; ADL-NEXT: jne .LBB1_8
-; ADL-NEXT: .LBB1_3:
-; ADL-NEXT: testq %rax, %rax
-; ADL-NEXT: je .LBB1_6
-; ADL-NEXT: # %bb.4: # %.preheader
-; ADL-NEXT: shlq $4, %rcx
-; ADL-NEXT: addq %rcx, %rsi
-; ADL-NEXT: shlq $4, %rax
-; ADL-NEXT: xorl %ecx, %ecx
-; ADL-NEXT: .p2align 4, 0x90
-; ADL-NEXT: .LBB1_5: # =>This Inner Loop Header: Depth=1
-; ADL-NEXT: {vex} vpdpwssd (%rsi,%rcx), %xmm1, %xmm0
-; ADL-NEXT: addq $16, %rcx
-; ADL-NEXT: cmpq %rcx, %rax
-; ADL-NEXT: jne .LBB1_5
-; ADL-NEXT: .LBB1_6:
-; ADL-NEXT: retq
-;
-; SPR-LABEL: foo_128:
-; SPR: # %bb.0:
-; SPR-NEXT: testl %edi, %edi
-; SPR-NEXT: jle .LBB1_6
-; SPR-NEXT: # %bb.1:
-; SPR-NEXT: movl %edi, %edx
-; SPR-NEXT: movl %edx, %eax
-; SPR-NEXT: andl $3, %eax
-; SPR-NEXT: cmpl $4, %edi
-; SPR-NEXT: jae .LBB1_7
-; SPR-NEXT: # %bb.2:
-; SPR-NEXT: xorl %ecx, %ecx
-; SPR-NEXT: jmp .LBB1_3
-; SPR-NEXT: .LBB1_7:
-; SPR-NEXT: andl $-4, %edx
-; SPR-NEXT: leaq 48(%rsi), %rdi
-; SPR-NEXT: xorl %ecx, %ecx
-; SPR-NEXT: .p2align 4, 0x90
-; SPR-NEXT: .LBB1_8: # =>This Inner Loop Header: Depth=1
-; SPR-NEXT: {vex} vpdpwssd -48(%rdi), %xmm1, %xmm0
-; SPR-NEXT: vpmaddwd -32(%rdi), %xmm1, %xmm2
-; SPR-NEXT: vpaddd %xmm2, %xmm0, %xmm0
-; SPR-NEXT: vpmaddwd -16(%rdi), %xmm1, %xmm2
-; SPR-NEXT: vpaddd %xmm2, %xmm0, %xmm0
-; SPR-NEXT: vpmaddwd (%rdi), %xmm1, %xmm2
-; SPR-NEXT: vpaddd %xmm2, %xmm0, %xmm0
-; SPR-NEXT: addq $4, %rcx
-; SPR-NEXT: addq $64, %rdi
-; SPR-NEXT: cmpq %rcx, %rdx
-; SPR-NEXT: jne .LBB1_8
-; SPR-NEXT: .LBB1_3:
-; SPR-NEXT: testq %rax, %rax
-; SPR-NEXT: je .LBB1_6
-; SPR-NEXT: # %bb.4: # %.preheader
-; SPR-NEXT: shlq $4, %rcx
-; SPR-NEXT: addq %rcx, %rsi
-; SPR-NEXT: shlq $4, %rax
-; SPR-NEXT: xorl %ecx, %ecx
-; SPR-NEXT: .p2align 4, 0x90
-; SPR-NEXT: .LBB1_5: # =>This Inner Loop Header: Depth=1
-; SPR-NEXT: {vex} vpdpwssd (%rsi,%rcx), %xmm1, %xmm0
-; SPR-NEXT: addq $16, %rcx
-; SPR-NEXT: cmpq %rcx, %rax
-; SPR-NEXT: jne .LBB1_5
-; SPR-NEXT: .LBB1_6:
-; SPR-NEXT: retq
+; AVX-LABEL: foo_128:
+; AVX: # %bb.0:
+; AVX-NEXT: testl %edi, %edi
+; AVX-NEXT: jle .LBB1_6
+; AVX-NEXT: # %bb.1:
+; AVX-NEXT: movl %edi, %edx
+; AVX-NEXT: movl %edx, %eax
+; AVX-NEXT: andl $3, %eax
+; AVX-NEXT: cmpl $4, %edi
+; AVX-NEXT: jae .LBB1_7
+; AVX-NEXT: # %bb.2:
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: jmp .LBB1_3
+; AVX-NEXT: .LBB1_7:
+; AVX-NEXT: andl $-4, %edx
+; AVX-NEXT: leaq 48(%rsi), %rdi
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: .p2align 4, 0x90
+; AVX-NEXT: .LBB1_8: # =>This Inner Loop Header: Depth=1
+; AVX-NEXT: {vex} vpdpwssd -48(%rdi), %xmm1, %xmm0
+; AVX-NEXT: vpmaddwd -32(%rdi), %xmm1, %xmm2
+; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpmaddwd -16(%rdi), %xmm1, %xmm2
+; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpmaddwd (%rdi), %xmm1, %xmm2
+; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX-NEXT: addq $4, %rcx
+; AVX-NEXT: addq $64, %rdi
+; AVX-NEXT: cmpq %rcx, %rdx
+; AVX-NEXT: jne .LBB1_8
+; AVX-NEXT: .LBB1_3:
+; AVX-NEXT: testq %rax, %rax
+; AVX-NEXT: je .LBB1_6
+; AVX-NEXT: # %bb.4: # %.preheader
+; AVX-NEXT: shlq $4, %rcx
+; AVX-NEXT: addq %rcx, %rsi
+; AVX-NEXT: shlq $4, %rax
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: .p2align 4, 0x90
+; AVX-NEXT: .LBB1_5: # =>This Inner Loop Header: Depth=1
+; AVX-NEXT: {vex} vpdpwssd (%rsi,%rcx), %xmm1, %xmm0
+; AVX-NEXT: addq $16, %rcx
+; AVX-NEXT: cmpq %rcx, %rax
+; AVX-NEXT: jne .LBB1_5
+; AVX-NEXT: .LBB1_6:
+; AVX-NEXT: retq
;
; AVX512-LABEL: foo_128:
; AVX512: # %bb.0:
@@ -476,99 +429,52 @@ define <4 x i64> @foo_reg_256(<4 x i64> %0, <4 x i64> %1, <4 x i64> %2, <4 x i64
; }
define <4 x i64> @foo_256(i32 %0, <4 x i64> %1, <4 x i64> %2, ptr %3) {
-; ADL-LABEL: foo_256:
-; ADL: # %bb.0:
-; ADL-NEXT: testl %edi, %edi
-; ADL-NEXT: jle .LBB4_6
-; ADL-NEXT: # %bb.1:
-; ADL-NEXT: movl %edi, %edx
-; ADL-NEXT: movl %edx, %eax
-; ADL-NEXT: andl $3, %eax
-; ADL-NEXT: cmpl $4, %edi
-; ADL-NEXT: jae .LBB4_7
-; ADL-NEXT: # %bb.2:
-; ADL-NEXT: xorl %ecx, %ecx
-; ADL-NEXT: jmp .LBB4_3
-; ADL-NEXT: .LBB4_7:
-; ADL-NEXT: andl $-4, %edx
-; ADL-NEXT: leaq 96(%rsi), %rdi
-; ADL-NEXT: xorl %ecx, %ecx
-; ADL-NEXT: .p2align 4, 0x90
-; ADL-NEXT: .LBB4_8: # =>This Inner Loop Header: Depth=1
-; ADL-NEXT: {vex} vpdpwssd -96(%rdi), %ymm1, %ymm0
-; ADL-NEXT: vpmaddwd -64(%rdi), %ymm1, %ymm2
-; ADL-NEXT: vpmaddwd -32(%rdi), %ymm1, %ymm3
-; ADL-NEXT: vpaddd %ymm2, %ymm0, %ymm0
-; ADL-NEXT: vpaddd %ymm3, %ymm0, %ymm0
-; ADL-NEXT: vpmaddwd (%rdi), %ymm1, %ymm2
-; ADL-NEXT: vpaddd %ymm2, %ymm0, %ymm0
-; ADL-NEXT: addq $4, %rcx
-; ADL-NEXT: subq $-128, %rdi
-; ADL-NEXT: cmpq %rcx, %rdx
-; ADL-NEXT: jne .LBB4_8
-; ADL-NEXT: .LBB4_3:
-; ADL-NEXT: testq %rax, %rax
-; ADL-NEXT: je .LBB4_6
-; ADL-NEXT: # %bb.4: # %.preheader
-; ADL-NEXT: shlq $5, %rcx
-; ADL-NEXT: addq %rcx, %rsi
-; ADL-NEXT: shlq $5, %rax
-; ADL-NEXT: xorl %ecx, %ecx
-; ADL-NEXT: .p2align 4, 0x90
-; ADL-NEXT: .LBB4_5: # =>This Inner Loop Header: Depth=1
-; ADL-NEXT: {vex} vpdpwssd (%rsi,%rcx), %ymm1, %ymm0
-; ADL-NEXT: addq $32, %rcx
-; ADL-NEXT: cmpq %rcx, %rax
-; ADL-NEXT: jne .LBB4_5
-; ADL-NEXT: .LBB4_6:
-; ADL-NEXT: retq
-;
-; SPR-LABEL: foo_256:
-; SPR: # %bb.0:
-; SPR-NEXT: testl %edi, %edi
-; SPR-NEXT: jle .LBB4_6
-; SPR-NEXT: # %bb.1:
-; SPR-NEXT: movl %edi, %edx
-; SPR-NEXT: movl %edx, %eax
-; SPR-NEXT: andl $3, %eax
-; SPR-NEXT: cmpl $4, %edi
-; SPR-NEXT: jae .LBB4_7
-; SPR-NEXT: # %bb.2:
-; SPR-NEXT: xorl %ecx, %ecx
-; SPR-NEXT: jmp .LBB4_3
-; SPR-NEXT: .LBB4_7:
-; SPR-NEXT: andl $-4, %edx
-; SPR-NEXT: leaq 96(%rsi), %rdi
-; SPR-NEXT: xorl %ecx, %ecx
-; SPR-NEXT: .p2align 4, 0x90
-; SPR-NEXT: .LBB4_8: # =>This Inner Loop Header: Depth=1
-; SPR-NEXT: {vex} vpdpwssd -96(%rdi), %ymm1, %ymm0
-; SPR-NEXT: vpmaddwd -64(%rdi), %ymm1, %ymm2
-; SPR-NEXT: vpaddd %ymm2, %ymm0, %ymm0
-; SPR-NEXT: vpmaddwd -32(%rdi), %ymm1, %ymm2
-; SPR-NEXT: vpaddd %ymm2, %ymm0, %ymm0
-; SPR-NEXT: vpmaddwd (%rdi), %ymm1, %ymm2
-; SPR-NEXT: vpaddd %ymm2, %ymm0, %ymm0
-; SPR-NEXT: addq $4, %rcx
-; SPR-NEXT: subq $-128, %rdi
-; SPR-NEXT: cmpq %rcx, %rdx
-; SPR-NEXT: jne .LBB4_8
-; SPR-NEXT: .LBB4_3:
-; SPR-NEXT: testq %rax, %rax
-; SPR-NEXT: je .LBB4_6
-; SPR-NEXT: # %bb.4: # %.preheader
-; SPR-NEXT: shlq $5, %rcx
-; SPR-NEXT: addq %rcx, %rsi
-; SPR-NEXT: shlq $5, %rax
-; SPR-NEXT: xorl %ecx, %ecx
-; SPR-NEXT: .p2align 4, 0x90
-; SPR-NEXT: .LBB4_5: # =>This Inner Loop Header: Depth=1
-; SPR-NEXT: {vex} vpdpwssd (%rsi,%rcx), %ymm1, %ymm0
-; SPR-NEXT: addq $32, %rcx
-; SPR-NEXT: cmpq %rcx, %rax
-; SPR-NEXT: jne .LBB4_5
-; SPR-NEXT: .LBB4_6:
-; SPR-NEXT: retq
+; AVX-LABEL: foo_256:
+; AVX: # %bb.0:
+; AVX-NEXT: testl %edi, %edi
+; AVX-NEXT: jle .LBB4_6
+; AVX-NEXT: # %bb.1:
+; AVX-NEXT: movl %edi, %edx
+; AVX-NEXT: movl %edx, %eax
+; AVX-NEXT: andl $3, %eax
+; AVX-NEXT: cmpl $4, %edi
+; AVX-NEXT: jae .LBB4_7
+; AVX-NEXT: # %bb.2:
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: jmp .LBB4_3
+; AVX-NEXT: .LBB4_7:
+; AVX-NEXT: andl $-4, %edx
+; AVX-NEXT: leaq 96(%rsi), %rdi
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: .p2align 4, 0x90
+; AVX-NEXT: .LBB4_8: # =>This Inner Loop Header: Depth=1
+; AVX-NEXT: {vex} vpdpwssd -96(%rdi), %ymm1, %ymm0
+; AVX-NEXT: vpmaddwd -64(%rdi), %ymm1, %ymm2
+; AVX-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vpmaddwd -32(%rdi), %ymm1, %ymm2
+; AVX-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX-NEXT: vpmaddwd (%rdi), %ymm1, %ymm2
+; AVX-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX-NEXT: addq $4, %rcx
+; AVX-NEXT: subq $-128, %rdi
+; AVX-NEXT: cmpq %rcx, %rdx
+; AVX-NEXT: jne .LBB4_8
+; AVX-NEXT: .LBB4_3:
+; AVX-NEXT: testq %rax, %rax
+; AVX-NEXT: je .LBB4_6
+; AVX-NEXT: # %bb.4: # %.preheader
+; AVX-NEXT: shlq $5, %rcx
+; AVX-NEXT: addq %rcx, %rsi
+; AVX-NEXT: shlq $5, %rax
+; AVX-NEXT: xorl %ecx, %ecx
+; AVX-NEXT: .p2align 4, 0x90
+; AVX-NEXT: .LBB4_5: # =>This Inner Loop Header: Depth=1
+; AVX-NEXT: {vex} vpdpwssd (%rsi,%rcx), %ymm1, %ymm0
+; AVX-NEXT: addq $32, %rcx
+; AVX-NEXT: cmpq %rcx, %rax
+; AVX-NEXT: jne .LBB4_5
+; AVX-NEXT: .LBB4_6:
+; AVX-NEXT: retq
;
; AVX512-LABEL: foo_256:
; AVX512: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/cfi-xmm.ll b/llvm/test/CodeGen/X86/cfi-xmm.ll
index 36cf95fc28c2..76c59ffdf942 100644
--- a/llvm/test/CodeGen/X86/cfi-xmm.ll
+++ b/llvm/test/CodeGen/X86/cfi-xmm.ll
@@ -22,7 +22,7 @@ entry:
; CHECK: .cfi_endproc
; PEI-LABEL: name: _Z1fv
-; PEI: $rsp = frame-setup SUB64ri8 $rsp, 40, implicit-def dead $eflags
+; PEI: $rsp = frame-setup SUB64ri32 $rsp, 40, implicit-def dead $eflags
; PEI-NEXT: frame-setup MOVAPSmr $rsp, 1, $noreg, 16, $noreg, killed $xmm15 :: (store (s128) into %fixed-stack.1)
; PEI-NEXT: frame-setup MOVAPSmr $rsp, 1, $noreg, 0, $noreg, killed $xmm10 :: (store (s128) into %fixed-stack.0)
; PEI-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 48
@@ -31,5 +31,5 @@ entry:
; PEI-NEXT: INLINEASM {{.*}}
; PEI-NEXT: $xmm10 = MOVAPSrm $rsp, 1, $noreg, 0, $noreg :: (load (s128) from %fixed-stack.0)
; PEI-NEXT: $xmm15 = MOVAPSrm $rsp, 1, $noreg, 16, $noreg :: (load (s128) from %fixed-stack.1)
-; PEI-NEXT: $rsp = frame-destroy ADD64ri8 $rsp, 40, implicit-def dead $eflags
+; PEI-NEXT: $rsp = frame-destroy ADD64ri32 $rsp, 40, implicit-def dead $eflags
; PEI-NEXT: RET 0
diff --git a/llvm/test/CodeGen/X86/extend-set-cc-uses-dbg.ll b/llvm/test/CodeGen/X86/extend-set-cc-uses-dbg.ll
index ba4be8361bb9..88402c464abe 100644
--- a/llvm/test/CodeGen/X86/extend-set-cc-uses-dbg.ll
+++ b/llvm/test/CodeGen/X86/extend-set-cc-uses-dbg.ll
@@ -9,7 +9,7 @@ bb:
; CHECK: $eax = MOV32rm killed {{.*}} $rdi, {{.*}} debug-location !7 :: (load (s32) from %ir.p)
; CHECK-NEXT: $rax = KILL killed renamable $eax, debug-location !7
; CHECK-NEXT: MOV64mr $rsp, 1, $noreg, -8, $noreg, $rax :: (store (s64) into %stack.0)
- ; CHECK-NEXT: SUB64ri8 renamable $rax, 3, implicit-def $eflags, debug-location !7
+ ; CHECK-NEXT: SUB64ri32 renamable $rax, 3, implicit-def $eflags, debug-location !7
switch i32 %tmp, label %bb7 [
i32 0, label %bb1
diff --git a/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir b/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
index 38610c92e970..56cbe3f7b563 100644
--- a/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
+++ b/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
@@ -139,7 +139,7 @@ body: |
; CHECK: $rbp = frame-setup MOV64rr $rsp
; CHECK: CFI_INSTRUCTION def_cfa_register $rbp
; CHECK: frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp, debug-location !13
- ; CHECK: $rsp = frame-setup SUB64ri8 $rsp, 40, implicit-def dead $eflags
+ ; CHECK: $rsp = frame-setup SUB64ri32 $rsp, 40, implicit-def dead $eflags
; CHECK: CFI_INSTRUCTION offset $rbx, -24
; CHECK: renamable $eax = MOV32rm $rbp, 1, $noreg, -12, $noreg, debug-location !13 :: (dereferenceable load (s32) from %ir.a.addr)
; CHECK: renamable $rax = KILL killed renamable $eax, debug-location !13
diff --git a/llvm/test/CodeGen/X86/leaFixup32.mir b/llvm/test/CodeGen/X86/leaFixup32.mir
index d1de6793f0f7..67339715e7a2 100644
--- a/llvm/test/CodeGen/X86/leaFixup32.mir
+++ b/llvm/test/CodeGen/X86/leaFixup32.mir
@@ -8,7 +8,7 @@
;generated using: llc -stop-after x86-pad-short-functions fixup-lea.ll > leaFinxup32.mir
;test2add_32: 3 operands LEA32r that can be replaced with 2 add instructions
- ; where ADD32ri8 is chosen
+ ; where ADD32ri is chosen
define i32 @test2add_32() {
ret i32 0
}
@@ -109,7 +109,7 @@ body: |
; CHECK-LABEL: name: test2add_32
; CHECK: liveins: $eax, $ebp
; CHECK: $eax = ADD32rr $eax, $ebp, implicit-def $eflags
- ; CHECK: $eax = ADD32ri8 $eax, -5, implicit-def $eflags
+ ; CHECK: $eax = ADD32ri $eax, -5, implicit-def $eflags
; CHECK: RET64 $eax
$eax = LEA32r killed $eax, 1, killed $ebp, -5, $noreg
RET64 $eax
@@ -147,7 +147,7 @@ body: |
; CHECK-LABEL: name: test2add_ebp_32
; CHECK: liveins: $eax, $ebp
; CHECK: $ebp = ADD32rr $ebp, $eax, implicit-def $eflags
- ; CHECK: $ebp = ADD32ri8 $ebp, -5, implicit-def $eflags
+ ; CHECK: $ebp = ADD32ri $ebp, -5, implicit-def $eflags
; CHECK: RET64 $ebp
$ebp = LEA32r killed $ebp, 1, killed $eax, -5, $noreg
RET64 $ebp
@@ -223,7 +223,7 @@ body: |
; CHECK-LABEL: name: testleaadd_32
; CHECK: liveins: $eax, $ebp, $esi
; CHECK: $ebx = LEA32r killed $eax, 1, killed $ebp, 0, $noreg
- ; CHECK: $ebx = ADD32ri8 $ebx, -5, implicit-def $eflags
+ ; CHECK: $ebx = ADD32ri $ebx, -5, implicit-def $eflags
; CHECK: RET64 $ebx
$ebx = LEA32r killed $eax, 1, killed $ebp, -5, $noreg
RET64 $ebx
@@ -262,7 +262,7 @@ body: |
; CHECK-LABEL: name: testleaadd_ebp_32
; CHECK: liveins: $eax, $ebp
; CHECK: $ebx = LEA32r killed $eax, 1, killed $ebp, 0, $noreg
- ; CHECK: $ebx = ADD32ri8 $ebx, -5, implicit-def $eflags
+ ; CHECK: $ebx = ADD32ri $ebx, -5, implicit-def $eflags
; CHECK: RET64 $ebx
$ebx = LEA32r killed $ebp, 1, killed $eax, -5, $noreg
RET64 $ebx
@@ -531,7 +531,7 @@ body: |
; CHECK: bb.1:
; CHECK: liveins: $eax, $ebp, $ebx
; CHECK: $ebp = LEA32r killed $ebx, 4, killed $ebx, 0, $noreg
- ; CHECK: $ebp = ADD32ri8 $ebp, 5, implicit-def $eflags
+ ; CHECK: $ebp = ADD32ri $ebp, 5, implicit-def $eflags
; CHECK: RET64 $ebp
bb.0 (%ir-block.0):
liveins: $eax, $ebp, $ebx
diff --git a/llvm/test/CodeGen/X86/leaFixup64.mir b/llvm/test/CodeGen/X86/leaFixup64.mir
index fe5c414a56f9..00b880ade320 100644
--- a/llvm/test/CodeGen/X86/leaFixup64.mir
+++ b/llvm/test/CodeGen/X86/leaFixup64.mir
@@ -186,7 +186,7 @@ body: |
; CHECK-LABEL: name: testleaadd_64_32_1
; CHECK: liveins: $rax, $rbp
; CHECK: $eax = ADD32rr $eax, $ebp, implicit-def $eflags, implicit $rax, implicit $rbp
- ; CHECK: $eax = ADD32ri8 $eax, -5, implicit-def $eflags
+ ; CHECK: $eax = ADD32ri $eax, -5, implicit-def $eflags
; CHECK: RET64 $eax
$eax = LEA64_32r killed $rax, 1, killed $rbp, -5, $noreg
RET64 $eax
@@ -224,7 +224,7 @@ body: |
; CHECK-LABEL: name: testleaadd_rbp_64_32_1
; CHECK: liveins: $rax, $rbp
; CHECK: $ebp = ADD32rr $ebp, $eax, implicit-def $eflags, implicit $rbp, implicit $rax
- ; CHECK: $ebp = ADD32ri8 $ebp, -5, implicit-def $eflags
+ ; CHECK: $ebp = ADD32ri $ebp, -5, implicit-def $eflags
; CHECK: RET64 $ebp
$ebp = LEA64_32r killed $rbp, 1, killed $rax, -5, $noreg
RET64 $ebp
@@ -299,7 +299,7 @@ body: |
; CHECK-LABEL: name: test2add_64
; CHECK: liveins: $rax, $rbp
; CHECK: $rax = ADD64rr $rax, $rbp, implicit-def $eflags
- ; CHECK: $rax = ADD64ri8 $rax, -5, implicit-def $eflags
+ ; CHECK: $rax = ADD64ri32 $rax, -5, implicit-def $eflags
; CHECK: RET64 $eax
$rax = LEA64r killed $rax, 1, killed $rbp, -5, $noreg
RET64 $eax
@@ -337,7 +337,7 @@ body: |
; CHECK-LABEL: name: test2add_rbp_64
; CHECK: liveins: $rax, $rbp
; CHECK: $rbp = ADD64rr $rbp, $rax, implicit-def $eflags
- ; CHECK: $rbp = ADD64ri8 $rbp, -5, implicit-def $eflags
+ ; CHECK: $rbp = ADD64ri32 $rbp, -5, implicit-def $eflags
; CHECK: RET64 $ebp
$rbp = LEA64r killed $rbp, 1, killed $rax, -5, $noreg
RET64 $ebp
@@ -413,7 +413,7 @@ body: |
; CHECK-LABEL: name: testleaadd_64_32
; CHECK: liveins: $rax, $rbp
; CHECK: $ebx = LEA64_32r killed $rax, 1, killed $rbp, 0, $noreg
- ; CHECK: $ebx = ADD32ri8 $ebx, -5, implicit-def $eflags
+ ; CHECK: $ebx = ADD32ri $ebx, -5, implicit-def $eflags
; CHECK: RET64 $ebx
$ebx = LEA64_32r killed $rax, 1, killed $rbp, -5, $noreg
RET64 $ebx
@@ -452,7 +452,7 @@ body: |
; CHECK-LABEL: name: testleaadd_rbp_64_32
; CHECK: liveins: $rax, $rbp
; CHECK: $ebx = LEA64_32r killed $rax, 1, killed $rbp, 0, $noreg
- ; CHECK: $ebx = ADD32ri8 $ebx, -5, implicit-def $eflags
+ ; CHECK: $ebx = ADD32ri $ebx, -5, implicit-def $eflags
; CHECK: RET64 $ebx
$ebx = LEA64_32r killed $rbp, 1, killed $rax, -5, $noreg
RET64 $ebx
@@ -529,7 +529,7 @@ body: |
; CHECK-LABEL: name: testleaadd_64
; CHECK: liveins: $rax, $rbp
; CHECK: $rbx = LEA64r killed $rax, 1, killed $rbp, 0, $noreg
- ; CHECK: $rbx = ADD64ri8 $rbx, -5, implicit-def $eflags
+ ; CHECK: $rbx = ADD64ri32 $rbx, -5, implicit-def $eflags
; CHECK: RET64 $ebx
$rbx = LEA64r killed $rax, 1, killed $rbp, -5, $noreg
RET64 $ebx
@@ -568,7 +568,7 @@ body: |
; CHECK-LABEL: name: testleaadd_rbp_64
; CHECK: liveins: $rax, $rbp
; CHECK: $rbx = LEA64r killed $rax, 1, killed $rbp, 0, $noreg
- ; CHECK: $rbx = ADD64ri8 $rbx, -5, implicit-def $eflags
+ ; CHECK: $rbx = ADD64ri32 $rbx, -5, implicit-def $eflags
; CHECK: RET64 $ebx
$rbx = LEA64r killed $rbp, 1, killed $rax, -5, $noreg
RET64 $ebx
@@ -1026,7 +1026,7 @@ body: |
; CHECK: bb.1:
; CHECK: liveins: $rax, $rbp, $rbx
; CHECK: $rbp = LEA64r killed $rbx, 4, killed $rbx, 0, $noreg
- ; CHECK: $rbp = ADD64ri8 $rbp, 5, implicit-def $eflags
+ ; CHECK: $rbp = ADD64ri32 $rbp, 5, implicit-def $eflags
; CHECK: RET64 $ebp
bb.0 (%ir-block.0):
liveins: $rax, $rbp, $rbx
@@ -1115,7 +1115,7 @@ body: |
; CHECK: bb.1:
; CHECK: liveins: $rax, $rbp, $rbx
; CHECK: $ebp = LEA64_32r killed $rbx, 4, killed $rbx, 0, $noreg
- ; CHECK: $ebp = ADD32ri8 $ebp, 5, implicit-def $eflags
+ ; CHECK: $ebp = ADD32ri $ebp, 5, implicit-def $eflags
; CHECK: RET64 $ebp
bb.0 (%ir-block.0):
liveins: $rax, $rbp, $rbx
diff --git a/llvm/test/CodeGen/X86/limit-split-cost.mir b/llvm/test/CodeGen/X86/limit-split-cost.mir
index 1fec3d53c0bb..6f5329e5b332 100644
--- a/llvm/test/CodeGen/X86/limit-split-cost.mir
+++ b/llvm/test/CodeGen/X86/limit-split-cost.mir
@@ -101,14 +101,14 @@ body: |
successors: %bb.6(0x20000000), %bb.2(0x60000000)
INLINEASM &"", 1, 12, implicit-def dead early-clobber $r10, 12, implicit-def dead early-clobber $r11, 12, implicit-def dead early-clobber $r12, 12, implicit-def dead early-clobber $r13, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15, 12, implicit-def dead early-clobber $eflags, !3
- CMP32ri8 %0, 2, implicit-def $eflags
+ CMP32ri %0, 2, implicit-def $eflags
JCC_1 %bb.6, 4, implicit killed $eflags
JMP_1 %bb.2
bb.2.do.body:
successors: %bb.5(0x2aaaaaab), %bb.3(0x55555555)
- CMP32ri8 %0, 1, implicit-def $eflags
+ CMP32ri %0, 1, implicit-def $eflags
JCC_1 %bb.5, 4, implicit killed $eflags
JMP_1 %bb.3
@@ -140,7 +140,7 @@ body: |
bb.7.do.cond:
successors: %bb.8(0x04000000), %bb.1(0x7c000000)
- CMP32mi8 %6, 1, $noreg, 0, $noreg, 5, implicit-def $eflags :: (dereferenceable load (s32) from @m, !tbaa !4)
+ CMP32mi %6, 1, $noreg, 0, $noreg, 5, implicit-def $eflags :: (dereferenceable load (s32) from @m, !tbaa !4)
JCC_1 %bb.1, 5, implicit killed $eflags
JMP_1 %bb.8
diff --git a/llvm/test/CodeGen/X86/machinesink-debug-inv-0.mir b/llvm/test/CodeGen/X86/machinesink-debug-inv-0.mir
index ca8764618b85..643c557db77f 100644
--- a/llvm/test/CodeGen/X86/machinesink-debug-inv-0.mir
+++ b/llvm/test/CodeGen/X86/machinesink-debug-inv-0.mir
@@ -73,7 +73,7 @@ body: |
; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.2(0x50000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI:%[0-9]+]]:gr8 = PHI [[MOV8ri]], %bb.0, %8, %bb.3
- ; CHECK-NEXT: CMP32mi8 [[MOV64rm]], 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (dereferenceable load (s32) from @d, align 1)
+ ; CHECK-NEXT: CMP32mi [[MOV64rm]], 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (dereferenceable load (s32) from @d, align 1)
; CHECK-NEXT: JCC_1 %bb.3, 4, implicit $eflags
; CHECK-NEXT: JMP_1 %bb.2
; CHECK-NEXT: {{ $}}
@@ -110,7 +110,7 @@ body: |
successors: %bb.3(0x30000000), %bb.2(0x50000000)
%0:gr8 = PHI %2, %bb.0, %8, %bb.3
- CMP32mi8 %3, 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (dereferenceable load (s32) from @d, align 1)
+ CMP32mi %3, 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (dereferenceable load (s32) from @d, align 1)
%1:gr32 = MOV32rm %4, 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from @e, align 1)
JCC_1 %bb.3, 4, implicit $eflags
JMP_1 %bb.2
diff --git a/llvm/test/CodeGen/X86/optimize-compare.mir b/llvm/test/CodeGen/X86/optimize-compare.mir
index b1b1fdeff470..36ab851a8515 100644
--- a/llvm/test/CodeGen/X86/optimize-compare.mir
+++ b/llvm/test/CodeGen/X86/optimize-compare.mir
@@ -27,7 +27,7 @@ body: |
%0:gr64 = COPY $rsi
%1:gr64 = DEC64r %0, implicit-def $eflags
; CMP should be removed.
- CMP64ri8 %1, 0, implicit-def $eflags
+ CMP64ri32 %1, 0, implicit-def $eflags
%2:gr64 = LEA64r %1, 5, $noreg, 12, $noreg
$al = SETCCr 4, implicit $eflags
...
@@ -228,15 +228,15 @@ body: |
; CHECK-LABEL: name: opt_redundant_flags_cmp_cmp_2
; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; CHECK-NEXT: CMP64ri8 [[COPY]], 15, implicit-def $eflags
+ ; CHECK-NEXT: CMP64ri32 [[COPY]], 15, implicit-def $eflags
; CHECK-NEXT: $cl = SETCCr 2, implicit $eflags
; CHECK-NEXT: $bl = SETCCr 2, implicit $eflags
%0:gr64 = COPY $rsi
%1:gr64 = COPY $rdi
- CMP64ri8 %0, 15, implicit-def $eflags
+ CMP64ri32 %0, 15, implicit-def $eflags
$cl = SETCCr 2, implicit $eflags
; 2nd CMP should be removed.
- CMP64ri8 %0, 15, implicit-def $eflags
+ CMP64ri32 %0, 15, implicit-def $eflags
$bl = SETCCr 2, implicit $eflags
...
---
@@ -316,11 +316,11 @@ body: |
bb.0:
; CHECK-LABEL: name: opt_redundant_flags_cmp_test
; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $esi
- ; CHECK-NEXT: CMP32ri8 [[COPY]], 0, implicit-def $eflags
+ ; CHECK-NEXT: CMP32ri [[COPY]], 0, implicit-def $eflags
; CHECK-NEXT: $cl = SETCCr 2, implicit $eflags
; CHECK-NEXT: $bl = SETCCr 2, implicit $eflags
%0:gr32 = COPY $esi
- CMP32ri8 %0, 0, implicit-def $eflags
+ CMP32ri %0, 0, implicit-def $eflags
$cl = SETCCr 2, implicit $eflags
; TEST should be removed
TEST32rr %0, %0, implicit-def $eflags
@@ -339,7 +339,7 @@ body: |
TEST32rr %0, %0, implicit-def $eflags
$cl = SETCCr 2, implicit $eflags
; TEST should be removed
- CMP32ri8 %0, 0, implicit-def $eflags
+ CMP32ri %0, 0, implicit-def $eflags
$bl = SETCCr 2, implicit $eflags
...
---
@@ -385,7 +385,7 @@ body: |
bb.0:
; CHECK-LABEL: name: opt_redundant_flags_adjusted_imm_0
; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi
- ; CHECK-NEXT: CMP64ri8 [[COPY]], 1, implicit-def $eflags
+ ; CHECK-NEXT: CMP64ri32 [[COPY]], 1, implicit-def $eflags
; CHECK-NEXT: $cl = SETCCr 4, implicit $eflags
; CHECK-NEXT: $bl = SETCCr 15, implicit $eflags
; CHECK-NEXT: $bl = SETCCr 7, implicit $eflags
@@ -393,10 +393,10 @@ body: |
; CHECK-NEXT: $bl = SETCCr 6, implicit $eflags
%0:gr64 = COPY $rsi
; CMP+SETCC %0 == 1
- CMP64ri8 %0, 1, implicit-def $eflags
+ CMP64ri32 %0, 1, implicit-def $eflags
$cl = SETCCr 4, implicit $eflags
; CMP+SETCC %0 >= 2; CMP can be removed.
- CMP64ri8 %0, 2, implicit-def $eflags
+ CMP64ri32 %0, 2, implicit-def $eflags
; %0 >=s 2 --> %0 >s 1
$bl = SETCCr 13, implicit $eflags
; %0 >=u 2 --> %0 >u 1
@@ -412,7 +412,7 @@ body: |
bb.0:
; CHECK-LABEL: name: opt_redundant_flags_adjusted_imm_1
; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi
- ; CHECK-NEXT: CMP64ri8 [[COPY]], 42, implicit-def $eflags
+ ; CHECK-NEXT: CMP64ri32 [[COPY]], 42, implicit-def $eflags
; CHECK-NEXT: $cl = SETCCr 5, implicit $eflags
; CHECK-NEXT: $bl = SETCCr 13, implicit $eflags
; CHECK-NEXT: $bl = SETCCr 3, implicit $eflags
@@ -420,10 +420,10 @@ body: |
; CHECK-NEXT: $bl = SETCCr 2, implicit $eflags
%0:gr64 = COPY $rsi
; CMP+SETCC %0 != 42
- CMP64ri8 %0, 42, implicit-def $eflags
+ CMP64ri32 %0, 42, implicit-def $eflags
$cl = SETCCr 5, implicit $eflags
; CMP+SETCC %0 >= 2; CMP can be removed.
- CMP64ri8 %0, 41, implicit-def $eflags
+ CMP64ri32 %0, 41, implicit-def $eflags
; %0 >s 41 --> %0 >=s 42
$bl = SETCCr 15, implicit $eflags
; %0 >u 41 --> %0 >=u 42
@@ -484,16 +484,16 @@ body: |
bb.0:
; CHECK-LABEL: name: opt_redundant_flags_adjusted_imm_noopt_0
; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi
- ; CHECK-NEXT: CMP64ri8 [[COPY]], 42, implicit-def $eflags
+ ; CHECK-NEXT: CMP64ri32 [[COPY]], 42, implicit-def $eflags
; CHECK-NEXT: $cl = SETCCr 4, implicit $eflags
- ; CHECK-NEXT: CMP64ri8 [[COPY]], 41, implicit-def $eflags
+ ; CHECK-NEXT: CMP64ri32 [[COPY]], 41, implicit-def $eflags
; CHECK-NEXT: $bl = SETCCr 4, implicit $eflags
%0:gr64 = COPY $rsi
; CMP+SETCC %0 <s 1
- CMP64ri8 %0, 42, implicit-def $eflags
+ CMP64ri32 %0, 42, implicit-def $eflags
$cl = SETCCr 4, implicit $eflags
; CMP should not be removed.
- CMP64ri8 %0, 41, implicit-def $eflags
+ CMP64ri32 %0, 41, implicit-def $eflags
; %0 == 41
$bl = SETCCr 4, implicit $eflags
...
diff --git a/llvm/test/CodeGen/X86/peephole-fold-testrr.mir b/llvm/test/CodeGen/X86/peephole-fold-testrr.mir
index 99df2bc0b343..caaf9fe50b73 100644
--- a/llvm/test/CodeGen/X86/peephole-fold-testrr.mir
+++ b/llvm/test/CodeGen/X86/peephole-fold-testrr.mir
@@ -39,7 +39,7 @@ body: |
; CHECK-LABEL: name: atomic
; CHECK: liveins: $rdi
; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; CHECK: CMP64mi8 [[COPY]], 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (load unordered (s64) from %ir.arg)
+ ; CHECK: CMP64mi32 [[COPY]], 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (load unordered (s64) from %ir.arg)
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 killed [[SETCCr]]
; CHECK: $eax = COPY [[MOVZX32rr8_]]
@@ -72,7 +72,7 @@ body: |
; CHECK-LABEL: name: nonatomic_unoptimized
; CHECK: liveins: $rdi
; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; CHECK: CMP64mi8 [[COPY]], 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (load (s64) from %ir.arg)
+ ; CHECK: CMP64mi32 [[COPY]], 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (load (s64) from %ir.arg)
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 killed [[SETCCr]]
; CHECK: $eax = COPY [[MOVZX32rr8_]]
diff --git a/llvm/test/CodeGen/X86/pr46827.ll b/llvm/test/CodeGen/X86/pr46827.ll
index f5d496fdddf5..fa3ff064ea11 100644
--- a/llvm/test/CodeGen/X86/pr46827.ll
+++ b/llvm/test/CodeGen/X86/pr46827.ll
@@ -4,7 +4,7 @@
; CHECK: bb.0.bb107:
; CHECK: successors: %bb.3(0x40000000), %bb.4(0x40000000)
; CHECK: %0:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (load (s32) from %fixed-stack.0, align 16)
-; CHECK: %1:gr32 = SUB32ri8 %0, 1, implicit-def $eflags
+; CHECK: %1:gr32 = SUB32ri %0, 1, implicit-def $eflags
; CHECK: XBEGIN_4 %bb.4, implicit-def $eax
; CHECK: bb.3.bb107:
; CHECK: successors: %bb.5(0x80000000)
diff --git a/llvm/test/CodeGen/X86/push-cfi.ll b/llvm/test/CodeGen/X86/push-cfi.ll
index a7fcfc6f219d..7bbd5142e739 100644
--- a/llvm/test/CodeGen/X86/push-cfi.ll
+++ b/llvm/test/CodeGen/X86/push-cfi.ll
@@ -28,7 +28,7 @@ declare void @empty()
; DARWIN-NOT: pushl
; PEI-LABEL: name: test1_nofp
-; PEI: $esp = frame-setup SUB32ri8 $esp, 12, implicit-def dead $eflags
+; PEI: $esp = frame-setup SUB32ri $esp, 12, implicit-def dead $eflags
; PEI-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; PEI-NOT: frame-setup CFI_INSTRUCTION
; PEI: ...
diff --git a/llvm/test/CodeGen/X86/statepoint-cmp-sunk-past-statepoint.ll b/llvm/test/CodeGen/X86/statepoint-cmp-sunk-past-statepoint.ll
index 2a98e3dd91ee..bd09b0388391 100644
--- a/llvm/test/CodeGen/X86/statepoint-cmp-sunk-past-statepoint.ll
+++ b/llvm/test/CodeGen/X86/statepoint-cmp-sunk-past-statepoint.ll
@@ -62,7 +62,7 @@ zero:
; CHECK: bb.5
; CHECK: %3:gr64 = COPY %10
; CHECK-LV: %4:gr64 = COPY killed %10
-; CHECK-LV: %4:gr64 = nuw ADD64ri8 %4, 8, implicit-def dead $eflags
+; CHECK-LV: %4:gr64 = nuw ADD64ri32 %4, 8, implicit-def dead $eflags
; CHECK-LIS: %4:gr64 = LEA64r %10, 1, $noreg, 8, $noreg
; CHECK: TEST64rr killed %1, %1, implicit-def $eflags
; CHECK: JCC_1 %bb.1, 5, implicit killed $eflags
diff --git a/llvm/test/CodeGen/X86/switch-bit-test-unreachable-default.ll b/llvm/test/CodeGen/X86/switch-bit-test-unreachable-default.ll
index e0841eff1087..07f87b6db4c6 100644
--- a/llvm/test/CodeGen/X86/switch-bit-test-unreachable-default.ll
+++ b/llvm/test/CodeGen/X86/switch-bit-test-unreachable-default.ll
@@ -40,7 +40,7 @@ define i32 @baz(i32 %0) {
; CHECK-GISEL: %0:gr32 = COPY $edi
; CHECK-GISEL: %10:gr32 = MOV32ri 1
; CHECK-GISEL: %11:gr32 = MOV32r0 implicit-def $eflags
-; CHECK-GISEL: %2:gr32 = SUB32ri8 %0:gr32(tied-def 0), 0, implicit-def $eflags
+; CHECK-GISEL: %2:gr32 = SUB32ri %0:gr32(tied-def 0), 0, implicit-def $eflags
; CHECK-GISEL: bb.5 (%ir-block.1):
; CHECK-GISEL: ; predecessors: %bb.1
; CHECK-GISEL: successors: %bb.4(0x55555555), %bb.2(0x2aaaaaab); %bb.4(66.67%), %bb.2(33.33%)
diff --git a/llvm/test/CodeGen/X86/switch-lower-peel-top-case.ll b/llvm/test/CodeGen/X86/switch-lower-peel-top-case.ll
index bdf07aea8489..8e9c112d0523 100644
--- a/llvm/test/CodeGen/X86/switch-lower-peel-top-case.ll
+++ b/llvm/test/CodeGen/X86/switch-lower-peel-top-case.ll
@@ -32,7 +32,7 @@ entry:
; CHECK: JMP_1 %[[BB4_LABEL]]
; CHECK: [[BB4_LABEL:.*]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE1_LABEL:.*]](0x66666666), %[[DEFAULT_BB_LABEL:.*]](0x1999999a)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 8, implicit-def $eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 8, implicit-def $eflags
; CHECK: JCC_1 %[[CASE1_LABEL]], 4, implicit $eflags
; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]]
; CHECK: [[BB2_LABEL]].{{[a-zA-Z0-9.]+}}:
@@ -74,38 +74,38 @@ entry:
; CHECK: successors: %[[PEELED_CASE_LABEL:.*]](0x59999999), %[[PEELED_SWITCH_LABEL:.*]](0x26666667)
; CHECK: %[[VAL:[0-9]+]]:gr32 = COPY $edi
-; CHECK: %{{[0-9]+}}:gr32 = ADD32ri8 %{{[0-9]+}}, -85, implicit-def dead $eflags
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %{{[0-9]+}}, 2, implicit-def $eflags
+; CHECK: %{{[0-9]+}}:gr32 = ADD32ri %{{[0-9]+}}, -85, implicit-def dead $eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %{{[0-9]+}}, 2, implicit-def $eflags
; CHECK: JCC_1 %[[PEELED_CASE_LABEL]], 2, implicit $eflags
; CHECK: JMP_1 %[[PEELED_SWITCH_LABEL]]
; CHECK: [[PEELED_SWITCH_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[BB1_LABEL:.*]](0x0088888a), %[[BB2_LABEL:.*]](0x7f777776)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 4, implicit-def $eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 4, implicit-def $eflags
; CHECK: JCC_1 %[[BB2_LABEL]], 15, implicit $eflags
; CHECK: JMP_1 %[[BB1_LABEL]]
; CHECK: [[BB1_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE4_LABEL:.*]](0x7f775a4f), %[[BB3_LABEL:.*]](0x0088a5b1)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 1, implicit-def $eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 1, implicit-def $eflags
; CHECK: JCC_1 %[[CASE4_LABEL]], 4, implicit $eflags
; CHECK: JMP_1 %[[BB3_LABEL]]
; CHECK: [[BB3_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE1_LABEL:.*]](0x66666666), %[[DEFAULT_BB_LABEL:.*]](0x1999999a)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], -40, implicit-def $eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], -40, implicit-def $eflags
; CHECK: JCC_1 %[[CASE1_LABEL]], 4, implicit $eflags
; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]]
; CHECK: [[BB2_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE5_LABEL:.*]](0x00000000), %[[BB4_LABEL:.*]](0x80000000)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 5, implicit-def $eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 5, implicit-def $eflags
; CHECK: JCC_1 %[[CASE5_LABEL]], 4, implicit $eflags
; CHECK: JMP_1 %[[BB4_LABEL]]
; CHECK: [[BB4_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE6_LABEL:.*]](0x00000000), %[[BB5_LABEL:.*]](0x80000000)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 7, implicit-def $eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 7, implicit-def $eflags
; CHECK: JCC_1 %[[CASE6_LABEL]], 4, implicit $eflags
; CHECK: JMP_1 %[[BB5_LABEL]]
; CHECK: [[BB5_LABEL]].{{[a-zA-Z0-9.]+}}:
; CHECK: successors: %[[CASE7_LABEL:.*]](0x00000000), %[[DEFAULT_BB_LABEL]](0x80000000)
-; CHECK: %{{[0-9]+}}:gr32 = SUB32ri8 %[[VAL]], 49, implicit-def $eflags
+; CHECK: %{{[0-9]+}}:gr32 = SUB32ri %[[VAL]], 49, implicit-def $eflags
; CHECK: JCC_1 %[[CASE7_LABEL]], 4, implicit $eflags
; CHECK: JMP_1 %[[DEFAULT_BB_LABEL]]
diff --git a/llvm/test/CodeGen/X86/tail-call-conditional.mir b/llvm/test/CodeGen/X86/tail-call-conditional.mir
index 05748f0a2eae..b34aeb78aec0 100644
--- a/llvm/test/CodeGen/X86/tail-call-conditional.mir
+++ b/llvm/test/CodeGen/X86/tail-call-conditional.mir
@@ -37,7 +37,7 @@ body: |
liveins: $rdi, $rsi
$rax = COPY $rdi
- CMP64ri8 $rax, 99, implicit-def $eflags
+ CMP64ri32 $rax, 99, implicit-def $eflags
JCC_1 %bb.4, 7, implicit $eflags
JMP_1 %bb.1
@@ -47,14 +47,14 @@ body: |
; CHECK-NEXT: {{^ $}}
; CHECK-NEXT: $rdi = COPY $rsi
; CHECK-NEXT: $rsi = COPY $rax
- ; CHECK-NEXT: CMP64ri8 $rax, 9, implicit-def $eflags
+ ; CHECK-NEXT: CMP64ri32 $rax, 9, implicit-def $eflags
; CHECK-NEXT: TCRETURNdi64cc @f1, 0, 6, csr_64, implicit $rsp, implicit $eflags, implicit $ssp, implicit $rsp, implicit $rdi, implicit $rsi, implicit $rdi, implicit-def $rdi, implicit $hsi, implicit-def $hsi, implicit $sih, implicit-def $sih, implicit $sil, implicit-def $sil, implicit $si, implicit-def $si, implicit $esi, implicit-def $esi, implicit $rsi, implicit-def $rsi, implicit $hdi, implicit-def $hdi, implicit $dih, implicit-def $dih, implicit $dil, implicit-def $dil, implicit $di, implicit-def $di, implicit $edi, implicit-def $edi
bb.1:
successors: %bb.2, %bb.3
liveins: $rax, $rsi
- CMP64ri8 $rax, 9, implicit-def $eflags
+ CMP64ri32 $rax, 9, implicit-def $eflags
JCC_1 %bb.3, 7, implicit $eflags
JMP_1 %bb.2
diff --git a/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir b/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir
index ac2e728ab5d0..ee7bf18e9cdc 100644
--- a/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir
+++ b/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir
@@ -19,7 +19,7 @@ body: |
; CHECK: JCC_1 %bb.1, 4, implicit $eflags
; CHECK: bb.3:
; CHECK: successors: %bb.6(0x30000000), %bb.4(0x50000000)
- ; CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load (s64))
+ ; CHECK: CMP64mi32 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load (s64))
; CHECK: JCC_1 %bb.6, 4, implicit $eflags
; CHECK: bb.4:
; CHECK: $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
@@ -32,7 +32,7 @@ body: |
; CHECK: JCC_1 %bb.1, 4, implicit $eflags
; CHECK: bb.7 (align 16):
; CHECK: successors: %bb.8(0x71555555), %bb.4(0x0eaaaaab)
- ; CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load (s64)), (load (s64))
+ ; CHECK: CMP64mi32 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load (s64)), (load (s64))
; CHECK: JCC_1 %bb.4, 5, implicit $eflags
; CHECK: bb.8:
; CHECK: successors: %bb.1(0x04000000), %bb.7(0x7c000000)
@@ -70,7 +70,7 @@ body: |
bb.9:
successors: %bb.10(0x30000000), %bb.15(0x50000000)
- CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load (s64))
+ CMP64mi32 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load (s64))
JCC_1 %bb.15, 5, implicit $eflags
bb.10:
@@ -89,7 +89,7 @@ body: |
bb.12:
successors: %bb.13(0x71555555), %bb.15(0x0eaaaaab)
- CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load (s64)), (load (s64))
+ CMP64mi32 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load (s64)), (load (s64))
JCC_1 %bb.15, 5, implicit $eflags
bb.13:
diff --git a/llvm/test/CodeGen/X86/throws-cfi-fp.ll b/llvm/test/CodeGen/X86/throws-cfi-fp.ll
index 294355e965b8..1ad12823ec67 100644
--- a/llvm/test/CodeGen/X86/throws-cfi-fp.ll
+++ b/llvm/test/CodeGen/X86/throws-cfi-fp.ll
@@ -30,7 +30,7 @@ define void @_Z6throwsv() #0 personality ptr @__gxx_personality_v0 {
; PEI-NEXT: frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp
; PEI-NEXT: {{^ +}}CFI_INSTRUCTION offset $rbx, -24
; PEI: bb.4.try.cont:
-; PEI-NEXT: $rsp = frame-destroy ADD64ri8 $rsp, 8, implicit-def dead $eflags
+; PEI-NEXT: $rsp = frame-destroy ADD64ri32 $rsp, 8, implicit-def dead $eflags
; PEI-NEXT: $rbx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
; PEI-NEXT: $rbp = frame-destroy POP64r implicit-def $rsp, implicit $rsp
; PEI-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $rsp, 8
diff --git a/llvm/test/CodeGen/X86/twoaddr-dbg-value.mir b/llvm/test/CodeGen/X86/twoaddr-dbg-value.mir
index b575852d4ba3..d931ed04e53a 100644
--- a/llvm/test/CodeGen/X86/twoaddr-dbg-value.mir
+++ b/llvm/test/CodeGen/X86/twoaddr-dbg-value.mir
@@ -8,7 +8,7 @@ body: |
%0:gr32 = COPY killed $edi
%1:gr32 = COPY killed %0
- %4:gr32 = XOR32ri8 %1, 1, implicit-def dead $eflags
+ %4:gr32 = XOR32ri %1, 1, implicit-def dead $eflags
DBG_VALUE %4
%5:gr32 = COPY %4
PUSH32r killed %1, implicit-def $esp, implicit $esp
@@ -22,6 +22,6 @@ body: |
# CHECK: PUSH32r %1, implicit-def $esp, implicit $esp
# CHECK-NEXT: %2:gr32 = COPY killed %1
-# CHECK-NEXT: %2:gr32 = XOR32ri8 %2, 1, implicit-def dead $eflags
+# CHECK-NEXT: %2:gr32 = XOR32ri %2, 1, implicit-def dead $eflags
# CHECK-NEXT: DBG_VALUE %2
# CHECK-NEXT: %3:gr32 = COPY killed %2
diff --git a/llvm/test/CodeGen/X86/update-terminator-debugloc.ll b/llvm/test/CodeGen/X86/update-terminator-debugloc.ll
index 828268580b40..7c592ef31878 100644
--- a/llvm/test/CodeGen/X86/update-terminator-debugloc.ll
+++ b/llvm/test/CodeGen/X86/update-terminator-debugloc.ll
@@ -26,7 +26,7 @@
; CHECK: SUB64rr [[VREG2]], [[VREG1]]
; CHECK-NEXT: JCC_1 {{.*}}, debug-location [[DLOC]]{{$}}
; CHECK: [[VREG3:%[^ ]+]]:gr64 = PHI [[VREG2]]
-; CHECK: [[VREG4:%[^ ]+]]:gr64 = nuw ADD64ri8 [[VREG3]], 4
+; CHECK: [[VREG4:%[^ ]+]]:gr64 = nuw ADD64ri32 [[VREG3]], 4
; CHECK: SUB64rr [[VREG4]], [[VREG1]]
; CHECK-NEXT: JCC_1 {{.*}}, debug-location [[DLOC]]{{$}}
; CHECK-NEXT: JMP_1 {{.*}}, debug-location [[DLOC]]{{$}}
diff --git a/llvm/test/CodeGen/X86/vecloadextract.ll b/llvm/test/CodeGen/X86/vecloadextract.ll
index ff2e2ba30eec..ad5fc22b1e6a 100644
--- a/llvm/test/CodeGen/X86/vecloadextract.ll
+++ b/llvm/test/CodeGen/X86/vecloadextract.ll
@@ -20,7 +20,7 @@ define i32 @const_index(ptr %v) {
; CHECK: name: variable_index
; CHECK: bb.0 (%ir-block.0):
; CHECK: [[INDEX:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (load (s32) from %fixed-stack.0)
-; CHECK: [[MASKED_INDEX:%[0-9]+]]:gr32_nosp = AND32ri8 [[INDEX]], 7, implicit-def dead $eflags
+; CHECK: [[MASKED_INDEX:%[0-9]+]]:gr32_nosp = AND32ri [[INDEX]], 7, implicit-def dead $eflags
; CHECK: [[POINTER:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (load (s32) from %fixed-stack.1)
; CHECK: [[LOAD:%[0-9]+]]:gr32 = MOV32rm killed [[POINTER]], 4, killed [[MASKED_INDEX]], 0, $noreg :: (load (s32))
; CHECK: $eax = COPY [[LOAD]]
@@ -34,7 +34,7 @@ define i32 @variable_index(ptr %v, i32 %i) {
; CHECK: name: variable_index_with_addrspace
; CHECK: bb.0 (%ir-block.0):
; CHECK: [[INDEX:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (load (s32) from %fixed-stack.0)
-; CHECK: [[MASKED_INDEX:%[0-9]+]]:gr32_nosp = AND32ri8 [[INDEX]], 7, implicit-def dead $eflags
+; CHECK: [[MASKED_INDEX:%[0-9]+]]:gr32_nosp = AND32ri [[INDEX]], 7, implicit-def dead $eflags
; CHECK: [[POINTER:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (load (s32) from %fixed-stack.1)
; CHECK: [[LOAD:%[0-9]+]]:gr32 = MOV32rm killed [[POINTER]], 4, killed [[MASKED_INDEX]], 0, $noreg :: (load (s32), addrspace 1)
; CHECK: $eax = COPY [[LOAD]]
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir b/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir
index 449ecba16cf4..56958efcefb8 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir
@@ -150,7 +150,7 @@ body: |
%1:gr32 = PHI %27, %bb.3, %24, %bb.4, debug-location !9
%25:gr32 = nsw ADD32rr %1, %0, implicit-def dead $eflags, debug-location !9
- CMP32ri8 killed %25, 6, implicit-def $eflags, debug-location !9
+ CMP32ri killed %25, 6, implicit-def $eflags, debug-location !9
JCC_1 %bb.3, 7, implicit killed $eflags, debug-location !9
JMP_1 %bb.6, debug-location !9
@@ -180,7 +180,7 @@ body: |
bb.9:
successors: %bb.10(0x80000000), %bb.12(0x00000000)
- CMP32ri8 undef %32:gr32, 16, implicit-def $eflags, debug-location !9
+ CMP32ri undef %32:gr32, 16, implicit-def $eflags, debug-location !9
JCC_1 %bb.12, 15, implicit killed $eflags, debug-location !9
JMP_1 %bb.10, debug-location !9
@@ -273,8 +273,8 @@ body: |
CALL64r undef %55:gr64, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit killed $esi, implicit killed $edx, implicit killed $ecx, implicit killed $r8, implicit-def $rsp, implicit-def $ssp, debug-location !9
ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp, debug-location !9
%13:gr64 = nuw nsw INC64r killed %3, implicit-def dead $eflags, debug-location !9
- %14:gr32 = ADD32ri8 killed %2, -8, implicit-def dead $eflags, debug-location !9
- CMP32ri8 %14, 8, implicit-def $eflags, debug-location !9
+ %14:gr32 = ADD32ri killed %2, -8, implicit-def dead $eflags, debug-location !9
+ CMP32ri %14, 8, implicit-def $eflags, debug-location !9
JCC_1 %bb.13, 15, implicit killed $eflags, debug-location !9
JMP_1 %bb.3, debug-location !9
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/twoaddr-to-threeaddr-sub.mir b/llvm/test/DebugInfo/MIR/InstrRef/twoaddr-to-threeaddr-sub.mir
index 0b1b9b9957ee..d40d3e1ff55f 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/twoaddr-to-threeaddr-sub.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/twoaddr-to-threeaddr-sub.mir
@@ -34,7 +34,7 @@ body: |
%0:gr32 = COPY killed $edi
%1:gr32 = SHL32ri killed %0, 5, implicit-def dead $eflags
- %2:gr32 = ADD32ri8_DB killed %1, 3, implicit-def dead $eflags, debug-instr-number 1
+ %2:gr32 = ADD32ri_DB killed %1, 3, implicit-def dead $eflags, debug-instr-number 1
DBG_INSTR_REF dbg-instr-ref(1, 0)
$eax = COPY killed %2
RET 0, killed $eax
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/x86-lea-fixup-2.mir b/llvm/test/DebugInfo/MIR/InstrRef/x86-lea-fixup-2.mir
index 778e1f8fa442..5e088d3f4600 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/x86-lea-fixup-2.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/x86-lea-fixup-2.mir
@@ -17,7 +17,7 @@ body: |
bb.0:
liveins: $eax, $ebp
- ; CHECK: $eax = ADD32ri8 {{.*}} debug-instr-number 2
+ ; CHECK: $eax = ADD32ri {{.*}} debug-instr-number 2
$eax = LEA32r killed $eax, 1, killed $ebp, -5, $noreg, debug-instr-number 1
RET64 $eax
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/x86-lea-fixup.mir b/llvm/test/DebugInfo/MIR/InstrRef/x86-lea-fixup.mir
index 75cc3d9480da..b84f86c032cc 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/x86-lea-fixup.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/x86-lea-fixup.mir
@@ -49,9 +49,9 @@ body: |
renamable $edi = KILL $edi, implicit-def $rdi
renamable $ecx = nsw LEA64_32r renamable $rdi, 8, renamable $rdi, 42, $noreg, debug-instr-number 1
- ; HASWELL: ADD32ri8 {{.*}} debug-instr-number 3
+ ; HASWELL: ADD32ri {{.*}} debug-instr-number 3
renamable $eax = nsw LEA64_32r killed renamable $rdi, 4, renamable $rdi, 2, $noreg, debug-instr-number 2
- ; HASWELL: ADD32ri8 {{.*}} debug-instr-number 4
+ ; HASWELL: ADD32ri {{.*}} debug-instr-number 4
renamable $eax = nsw IMUL32rr killed renamable $eax, killed renamable $ecx, implicit-def dead $eflags
RET64 $eax
diff --git a/llvm/test/DebugInfo/MIR/X86/empty-inline.mir b/llvm/test/DebugInfo/MIR/X86/empty-inline.mir
index d2b057af17c3..ff88eee964ec 100644
--- a/llvm/test/DebugInfo/MIR/X86/empty-inline.mir
+++ b/llvm/test/DebugInfo/MIR/X86/empty-inline.mir
@@ -100,7 +100,7 @@ body: |
successors: %bb.1(0x30000000), %bb.2(0x50000000)
liveins: $rdi
- CMP64mi8 $rip, 1, _, @a, _, 0, implicit-def $eflags :: (dereferenceable load (s64) from @a, align 4)
+ CMP64mi32 $rip, 1, _, @a, _, 0, implicit-def $eflags :: (dereferenceable load (s64) from @a, align 4)
JCC_1 %bb.1, 4, implicit $eflags
bb.2 (%ir-block.5):
diff --git a/llvm/test/DebugInfo/MIR/X86/kill-after-spill.mir b/llvm/test/DebugInfo/MIR/X86/kill-after-spill.mir
index 8e743f745f74..88cef41ebb9b 100644
--- a/llvm/test/DebugInfo/MIR/X86/kill-after-spill.mir
+++ b/llvm/test/DebugInfo/MIR/X86/kill-after-spill.mir
@@ -333,14 +333,14 @@ body: |
successors: %bb.6(0x30000000), %bb.7(0x50000000)
liveins: $rax, $r12, $r15
- CMP32mi8 renamable $r15, 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (load (s32) from %ir.tot_perf2, align 8)
+ CMP32mi renamable $r15, 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (load (s32) from %ir.tot_perf2, align 8)
JCC_1 %bb.7, 5, implicit $eflags, debug-location !57
bb.6.lor.lhs.false:
successors: %bb.8(0x30000000), %bb.7(0x50000000)
liveins: $rax, $r12, $r15
- CMP32mi8 killed renamable $r15, 1, $noreg, 4, $noreg, 0, implicit-def $eflags :: (load (s32) from %ir.tot_bw)
+ CMP32mi killed renamable $r15, 1, $noreg, 4, $noreg, 0, implicit-def $eflags :: (load (s32) from %ir.tot_bw)
JCC_1 %bb.8, 4, implicit $eflags, debug-location !57
bb.7.if.then14:
@@ -375,7 +375,7 @@ body: |
liveins: $ecx
$eax = MOV32rr killed $ecx
- $rsp = ADD64ri8 $rsp, 8, implicit-def dead $eflags
+ $rsp = ADD64ri32 $rsp, 8, implicit-def dead $eflags
$rbx = POP64r implicit-def $rsp, implicit $rsp
$r12 = POP64r implicit-def $rsp, implicit $rsp
$r13 = POP64r implicit-def $rsp, implicit $rsp
diff --git a/llvm/test/DebugInfo/MIR/X86/live-debug-values-stack-clobber.mir b/llvm/test/DebugInfo/MIR/X86/live-debug-values-stack-clobber.mir
index 763411aabea4..d5f6b2a10c98 100644
--- a/llvm/test/DebugInfo/MIR/X86/live-debug-values-stack-clobber.mir
+++ b/llvm/test/DebugInfo/MIR/X86/live-debug-values-stack-clobber.mir
@@ -161,7 +161,7 @@ body: |
$rsp = frame-setup SUB64ri8 $rsp, 24, implicit-def dead $eflags
renamable $ecx = MOV32rm $rip, 1, $noreg, @bees, $noreg, debug-location !28 :: (volatile dereferenceable load (s32) from @bees)
$eax = MOV32ri 3, implicit-def $rax
- CMP32ri8 killed renamable $ecx, 12, implicit-def $eflags, debug-location !28
+ CMP32ri killed renamable $ecx, 12, implicit-def $eflags, debug-location !28
JCC_1 %bb.4, 4, implicit $eflags, debug-location !28
bb.1.if.end:
@@ -172,7 +172,7 @@ body: |
MOV64mr $rsp, 1, $noreg, 8, $noreg, killed renamable $rdi :: (store (s64) into %stack.0)
INLINEASM &"", 1, 12, implicit-def dead early-clobber $rax, 12, implicit-def dead early-clobber $rbx, 12, implicit-def dead early-clobber $rcx, 12, implicit-def dead early-clobber $rdx, 12, implicit-def dead early-clobber $rsi, 12, implicit-def dead early-clobber $rdi, 12, implicit-def dead early-clobber $rbp, 12, implicit-def dead early-clobber $r8, 12, implicit-def dead early-clobber $r9, 12, implicit-def dead early-clobber $r10, 12, implicit-def dead early-clobber $r11, 12, implicit-def dead early-clobber $r12, 12, implicit-def dead early-clobber $r13, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15, 12, implicit-def dead early-clobber $df, 12, implicit-def dead early-clobber $fpsw, 12, implicit-def dead early-clobber $eflags, debug-location !28
renamable $eax = MOV32rm $rip, 1, $noreg, @bees, $noreg, debug-location !28 :: (volatile dereferenceable load (s32) from @bees)
- CMP32ri8 killed renamable $eax, 2, implicit-def $eflags, debug-location !28
+ CMP32ri killed renamable $eax, 2, implicit-def $eflags, debug-location !28
JCC_1 %bb.3, 5, implicit killed $eflags, debug-location !28
bb.2.if.then2:
@@ -190,7 +190,7 @@ body: |
CALL64pcrel32 @sum, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit-def $rsp, implicit-def $ssp, implicit-def $rax, debug-location !28
MOV64mr $rsp, 1, $noreg, 8, $noreg, $rax :: (store (s64) into %stack.0)
INLINEASM &"", 1, 12, implicit-def dead early-clobber $rax, 12, implicit-def dead early-clobber $rbx, 12, implicit-def dead early-clobber $rcx, 12, implicit-def dead early-clobber $rdx, 12, implicit-def dead early-clobber $rsi, 12, implicit-def dead early-clobber $rdi, 12, implicit-def dead early-clobber $rbp, 12, implicit-def dead early-clobber $r8, 12, implicit-def dead early-clobber $r9, 12, implicit-def dead early-clobber $r10, 12, implicit-def dead early-clobber $r11, 12, implicit-def dead early-clobber $r12, 12, implicit-def dead early-clobber $r13, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15, 12, implicit-def dead early-clobber $df, 12, implicit-def dead early-clobber $fpsw, 12, implicit-def dead early-clobber $eflags, debug-location !28
- ADD32mi8 $rip, 1, $noreg, @bees, $noreg, 1, implicit-def dead $eflags, debug-location !28 :: (volatile store (s32) into @bees), (volatile dereferenceable load (s32) from @bees)
+ ADD32mi $rip, 1, $noreg, @bees, $noreg, 1, implicit-def dead $eflags, debug-location !28 :: (volatile store (s32) into @bees), (volatile dereferenceable load (s32) from @bees)
renamable $rax = MOVSX64rm32 $rsp, 1, $noreg, 8, $noreg, debug-location !28 :: (load (s32) from %stack.0, align 8)
bb.4.return:
diff --git a/llvm/test/DebugInfo/MIR/X86/machinesink-subreg.mir b/llvm/test/DebugInfo/MIR/X86/machinesink-subreg.mir
index 9d60c1ab2e6e..56d1f6a3bbe5 100644
--- a/llvm/test/DebugInfo/MIR/X86/machinesink-subreg.mir
+++ b/llvm/test/DebugInfo/MIR/X86/machinesink-subreg.mir
@@ -69,7 +69,7 @@ body: |
; CHECK-NEXT: JMP_1
%2:gr64 = COPY $rdi
- %5:gr64 = ADD64ri8 %2, 1, implicit-def dead $eflags
+ %5:gr64 = ADD64ri32 %2, 1, implicit-def dead $eflags
CMP32ri $esi, 0, implicit-def $eflags
DBG_VALUE %5.sub_32bit, $noreg, !16, !13, debug-location !17
CMP32ri $esi, 0, implicit-def $eflags
@@ -80,7 +80,7 @@ body: |
bb.1.block1:
successors: %bb.2.exit
- %1:gr64 = ADD64ri8 %5, 4, implicit-def dead $eflags
+ %1:gr64 = ADD64ri32 %5, 4, implicit-def dead $eflags
JMP_1 %bb.2.exit
bb.2.exit:
diff --git a/llvm/test/DebugInfo/MIR/X86/machinesink.mir b/llvm/test/DebugInfo/MIR/X86/machinesink.mir
index 05f3743c1f2d..5d9d658afc43 100644
--- a/llvm/test/DebugInfo/MIR/X86/machinesink.mir
+++ b/llvm/test/DebugInfo/MIR/X86/machinesink.mir
@@ -121,9 +121,9 @@ body: |
; CHECK-LABEL: bb.1.nou:
; CHECK: [[SUNKVREG:%[0-9]+]]:gr64 = COPY [[ARG0VREG]]
; CHECK-NEXT: DBG_VALUE [[SUNKVREG]], $noreg, [[VARNUM]]
- ; CHECK-NEXT: ADD64ri8
+ ; CHECK-NEXT: ADD64ri32
; CHECK-NEXT: JMP_1
- %1:gr64 = ADD64ri8 %5, 4, implicit-def dead $eflags
+ %1:gr64 = ADD64ri32 %5, 4, implicit-def dead $eflags
JMP_1 %bb.2.exit
bb.2.exit:
@@ -161,7 +161,7 @@ body: |
; CHECK-NEXT: JMP_1
%2:gr64 = COPY $rdi
- %5:gr64 = ADD64ri8 %2, 1, implicit-def dead $eflags
+ %5:gr64 = ADD64ri32 %2, 1, implicit-def dead $eflags
CMP32ri $esi, 0, implicit-def $eflags
DBG_VALUE %5, $noreg, !103, !17, debug-location !104
CMP32ri $esi, 0, implicit-def $eflags
@@ -175,11 +175,11 @@ body: |
; This block should receive no DBG_VALUE.
; CHECK-LABEL: bb.1.block1:
; CHECK-NOT: DBG_VALUE
- ; CHECK: [[SUNKVREG2:%[0-9]+]]:gr64 = ADD64ri8 [[TEST2VREG]]
+ ; CHECK: [[SUNKVREG2:%[0-9]+]]:gr64 = ADD64ri32 [[TEST2VREG]]
; CHECK-NOT: DBG_VALUE
- ; CHECK-NEXT: ADD64ri8
+ ; CHECK-NEXT: ADD64ri32
; CHECK: JMP_1 %bb.2
- %1:gr64 = ADD64ri8 %5, 4, implicit-def dead $eflags
+ %1:gr64 = ADD64ri32 %5, 4, implicit-def dead $eflags
JMP_1 %bb.2.exit
bb.2.exit:
@@ -226,9 +226,9 @@ body: |
; CHECK-NOT: DBG_VALUE
; CHECK: COPY [[TEST3VREG]]
; CHECK-NOT: DBG_VALUE
- ; CHECK-NEXT: ADD64ri8
+ ; CHECK-NEXT: ADD64ri32
; CHECK: JMP_1 %bb.2
- %1:gr64 = ADD64ri8 %5, 4, implicit-def dead $eflags
+ %1:gr64 = ADD64ri32 %5, 4, implicit-def dead $eflags
JMP_1 %bb.2.exit
bb.2.exit:
diff --git a/llvm/test/DebugInfo/MIR/X86/merge-inline-loc4.mir b/llvm/test/DebugInfo/MIR/X86/merge-inline-loc4.mir
index 7ebeda4c7e82..24b17a628920 100644
--- a/llvm/test/DebugInfo/MIR/X86/merge-inline-loc4.mir
+++ b/llvm/test/DebugInfo/MIR/X86/merge-inline-loc4.mir
@@ -101,7 +101,7 @@ body: |
frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp
frame-setup CFI_INSTRUCTION def_cfa_offset 16
renamable $eax = MOV32rm $rip, 1, $noreg, @q1, $noreg, debug-location !12 :: (dereferenceable load (s32) from @q1, !tbaa !13)
- CMP32ri8 renamable $eax, 4, implicit-def $eflags, debug-location !17
+ CMP32ri renamable $eax, 4, implicit-def $eflags, debug-location !17
JCC_1 %bb.2, 12, implicit killed $eflags, debug-location !20
JMP_1 %bb.1, debug-location !20
@@ -117,7 +117,7 @@ body: |
renamable $eax = nsw IMUL32rri killed renamable $eax, 152, implicit-def dead $eflags, debug-location !22
renamable $eax = nsw ADD32ri8 killed renamable $eax, 100, implicit-def dead $eflags, debug-location !23
MOV32mr $rip, 1, $noreg, @g1, $noreg, killed renamable $eax, debug-location !24 :: (store (s32) into @g1, !tbaa !13)
- CMP32mi8 $rip, 1, $noreg, @q2, $noreg, 6, implicit-def $eflags, debug-location !26 :: (dereferenceable load (s32) from @q2, !tbaa !13)
+ CMP32mi $rip, 1, $noreg, @q2, $noreg, 6, implicit-def $eflags, debug-location !26 :: (dereferenceable load (s32) from @q2, !tbaa !13)
JCC_1 %bb.4, 12, implicit killed $eflags, debug-location !25
JMP_1 %bb.3, debug-location !25
diff --git a/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-post-regalloc.mir b/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-post-regalloc.mir
index b1aae60492e8..355530bc55ab 100644
--- a/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-post-regalloc.mir
+++ b/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-post-regalloc.mir
@@ -86,7 +86,7 @@ body: |
%0 = PHI %2, %bb.0.entry, %1, %bb.1.while.body
DBG_VALUE %0, _, !17, !DIExpression(), debug-location !18
- %1 = ADD64ri8 %0, 4, implicit-def dead $eflags, debug-location !20
+ %1 = ADD64ri32 %0, 4, implicit-def dead $eflags, debug-location !20
DBG_VALUE %1, _, !17, !DIExpression(), debug-location !18
%3 = MOV32rm %0, 1, _, 0, _, debug-location !21 :: (load (s32) from %ir.p.addr.0)
%4 = MOV64rm $rip, 1, _, target-flags(x86-gotpcrel) @x, _, debug-location !22 :: (load (s64) from got)
diff --git a/llvm/test/DebugInfo/X86/debug-loc-asan.mir b/llvm/test/DebugInfo/X86/debug-loc-asan.mir
index 9dd5fc5fa6cd..6f90c94e9947 100644
--- a/llvm/test/DebugInfo/X86/debug-loc-asan.mir
+++ b/llvm/test/DebugInfo/X86/debug-loc-asan.mir
@@ -221,7 +221,7 @@ body: |
$rsp = frame-setup AND64ri8 $rsp, -32, implicit-def dead $eflags
$rsp = frame-setup SUB64ri32 $rsp, 160, implicit-def dead $eflags
renamable $rax = LEA64r $rsp, 1, $noreg, 64, $noreg
- CMP32mi8 $noreg, 1, $noreg, @__asan_option_detect_stack_use_after_return, $noreg, 0, implicit-def $eflags :: (load (s32) from @__asan_option_detect_stack_use_after_return)
+ CMP32mi $noreg, 1, $noreg, @__asan_option_detect_stack_use_after_return, $noreg, 0, implicit-def $eflags :: (load (s32) from @__asan_option_detect_stack_use_after_return)
$rcx = MOV64rr $rax
MOV32mr $rsp, 1, $noreg, 60, $noreg, killed $edi :: (store (s32) into %stack.1)
MOV64mr $rsp, 1, $noreg, 48, $noreg, killed $rax :: (store (s64) into %stack.2)
diff --git a/llvm/test/DebugInfo/X86/debug-loc-offset.mir b/llvm/test/DebugInfo/X86/debug-loc-offset.mir
index 95f524fc162c..e453788a6f8a 100644
--- a/llvm/test/DebugInfo/X86/debug-loc-offset.mir
+++ b/llvm/test/DebugInfo/X86/debug-loc-offset.mir
@@ -240,7 +240,7 @@ body: |
renamable $eax = MOV32rm $ebp, 1, $noreg, 8, $noreg :: (load (s32) from %fixed-stack.1)
DBG_VALUE renamable $eax, 0, !20, !DIExpression(DW_OP_deref), debug-location !21
MOV32mi $ebp, 1, $noreg, -4, $noreg, 2, debug-location !23 :: (store (s32) into %ir.z)
- CMP32mi8 renamable $eax, 1, $noreg, 4, $noreg, 2, implicit-def $eflags, debug-location !24 :: (load (s32) from %ir.var)
+ CMP32mi renamable $eax, 1, $noreg, 4, $noreg, 2, implicit-def $eflags, debug-location !24 :: (load (s32) from %ir.var)
MOV32mr $ebp, 1, $noreg, -8, $noreg, killed $eax :: (store (s32) into %stack.1)
DBG_VALUE $ebp, 0, !20, !DIExpression(DW_OP_constu, 8, DW_OP_minus, DW_OP_deref, DW_OP_deref), debug-location !21
JCC_1 %bb.2, 14, implicit $eflags, debug-location !24
diff --git a/llvm/test/DebugInfo/X86/location-range.mir b/llvm/test/DebugInfo/X86/location-range.mir
index 6b4a44b3f6ed..2682d1d997a1 100644
--- a/llvm/test/DebugInfo/X86/location-range.mir
+++ b/llvm/test/DebugInfo/X86/location-range.mir
@@ -128,7 +128,7 @@ body: |
bb.0.entry:
successors: %bb.1(0x50000000), %bb.2(0x30000000)
- CMP64mi8 $rip, 1, $noreg, @c, $noreg, 0, implicit-def $eflags, debug-location !21 :: (dereferenceable load (s64) from @c)
+ CMP64mi32 $rip, 1, $noreg, @c, $noreg, 0, implicit-def $eflags, debug-location !21 :: (dereferenceable load (s64) from @c)
JCC_1 %bb.2, 4, implicit killed $eflags, debug-location !27
bb.1.if.then:
diff --git a/llvm/test/DebugInfo/X86/machinecse-wrongdebug-hoist.ll b/llvm/test/DebugInfo/X86/machinecse-wrongdebug-hoist.ll
index c9f32f837958..4660315040b3 100644
--- a/llvm/test/DebugInfo/X86/machinecse-wrongdebug-hoist.ll
+++ b/llvm/test/DebugInfo/X86/machinecse-wrongdebug-hoist.ll
@@ -1,6 +1,6 @@
; RUN: llc %s -o - -print-after=machine-cse -mtriple=x86_64-- 2>&1 | FileCheck %s --match-full-lines
-; CHECK: %5:gr32 = SUB32ri8 %0:gr32(tied-def 0), 1, implicit-def $eflags, debug-location !24; a.c:3:13
+; CHECK: %5:gr32 = SUB32ri %0:gr32(tied-def 0), 1, implicit-def $eflags, debug-location !24; a.c:3:13
; CHECK-NEXT: %10:gr32 = MOVSX32rr8 %4:gr8
; CHECK-NEXT: JCC_1 %bb.2, 15, implicit $eflags, debug-location !25; a.c:3:18
diff --git a/llvm/test/DebugInfo/X86/sdag-dbgvalue-ssareg.ll b/llvm/test/DebugInfo/X86/sdag-dbgvalue-ssareg.ll
index 7de231300af4..6656bf2409e8 100644
--- a/llvm/test/DebugInfo/X86/sdag-dbgvalue-ssareg.ll
+++ b/llvm/test/DebugInfo/X86/sdag-dbgvalue-ssareg.ll
@@ -19,7 +19,7 @@ target triple = "x86_64-unknown-linux-gnu"
define dso_local i32 @main(i32 %arg0, i32 %arg1) local_unnamed_addr !dbg !11 {
entry:
; CHECK-LABEL: bb.0.entry:
-; INSTRREF: ADD32ri8 {{.*}} debug-instr-number 1
+; INSTRREF: ADD32ri {{.*}} debug-instr-number 1
%0 = add i32 %arg0, 42, !dbg !26
%1 = add i32 %arg1, 101, !dbg !26
%cmp = icmp eq i32 %1, 0
diff --git a/llvm/test/DebugInfo/assignment-tracking/X86/lower-to-value.ll b/llvm/test/DebugInfo/assignment-tracking/X86/lower-to-value.ll
index 88ff9deaaaa7..859056aff633 100644
--- a/llvm/test/DebugInfo/assignment-tracking/X86/lower-to-value.ll
+++ b/llvm/test/DebugInfo/assignment-tracking/X86/lower-to-value.ll
@@ -42,9 +42,9 @@
;; The final assignment (X.B += 2) doesn't get stored back to the alloca. This
;; means that that the stack location isn't valid for the entire lifetime of X.
-; DBGVALUE: %2:gr64 = nsw ADD64ri8 %1, 2, implicit-def dead $eflags, debug-location
+; DBGVALUE: %2:gr64 = nsw ADD64ri32 %1, 2, implicit-def dead $eflags, debug-location
; DBGVALUE-NEXT: DBG_VALUE %2, $noreg, ![[VAR]], !DIExpression(DW_OP_LLVM_fragment, 64, 64), debug-location
-; INSTRREF: %2:gr64 = nsw ADD64ri8 %1, 2, implicit-def dead $eflags, debug-instr-number 1
+; INSTRREF: %2:gr64 = nsw ADD64ri32 %1, 2, implicit-def dead $eflags, debug-instr-number 1
; INSTRREF-NEXT: DBG_INSTR_REF ![[VAR]], !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_fragment, 64, 64), dbg-instr-ref(1, 0), debug-location
;; Bits [0, 64) are still stack homed. FIXME, this particular reinstatement is
diff --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc
index 8a1a14c2f001..1cb6b51c79ea 100644
--- a/llvm/test/TableGen/x86-fold-tables.inc
+++ b/llvm/test/TableGen/x86-fold-tables.inc
@@ -1,12 +1,9 @@
static const X86MemoryFoldTableEntry MemoryFoldTable2Addr[] = {
- {X86::ADD16ri8_DB, X86::ADD16mi8, TB_NO_REVERSE},
{X86::ADD16ri_DB, X86::ADD16mi, TB_NO_REVERSE},
{X86::ADD16rr_DB, X86::ADD16mr, TB_NO_REVERSE},
- {X86::ADD32ri8_DB, X86::ADD32mi8, TB_NO_REVERSE},
{X86::ADD32ri_DB, X86::ADD32mi, TB_NO_REVERSE},
{X86::ADD32rr_DB, X86::ADD32mr, TB_NO_REVERSE},
{X86::ADD64ri32_DB, X86::ADD64mi32, TB_NO_REVERSE},
- {X86::ADD64ri8_DB, X86::ADD64mi8, TB_NO_REVERSE},
{X86::ADD64rr_DB, X86::ADD64mr, TB_NO_REVERSE},
{X86::ADD8ri_DB, X86::ADD8mi, TB_NO_REVERSE},
{X86::ADD8rr_DB, X86::ADD8mr, TB_NO_REVERSE},
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-twoaddr.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-twoaddr.ll
index d765f3416083..4a3e21c6d4e1 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-twoaddr.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-twoaddr.ll
@@ -26,7 +26,7 @@ for.body14: ; preds = %for.body14, %for.bo
call void @llvm.pseudoprobe(i64 -6878943695821059507, i64 9, i32 0, i64 -1)
;; Check an opeq form of instruction is created.
; CHECK: %[[#REG:]]:gr64_nosp = COPY killed %[[#]]
-; CHECK: %[[#REG]]:gr64_nosp = nuw ADD64ri8 %[[#REG]], 4, implicit-def dead $eflags
+; CHECK: %[[#REG]]:gr64_nosp = nuw ADD64ri32 %[[#REG]], 4, implicit-def dead $eflags
%niter137.nsub.3 = add i64 %niter137, -4
%niter137.ncmp.3 = icmp eq i64 %niter137.nsub.3, 0
br i1 %niter137.ncmp.3, label %for.cond25.preheader.loopexit.unr-lcssa, label %for.body14
diff --git a/llvm/utils/TableGen/X86ManualFoldTables.def b/llvm/utils/TableGen/X86ManualFoldTables.def
index 8f1897a5cde9..d949830b0988 100644
--- a/llvm/utils/TableGen/X86ManualFoldTables.def
+++ b/llvm/utils/TableGen/X86ManualFoldTables.def
@@ -233,13 +233,10 @@ NOFOLD(INSERTPSrr)
// The following entries are added manually b/c the encodings of reg form does not match the
// encoding of memory form
ENTRY(ADD16ri_DB, ADD16mi, TB_NO_REVERSE)
-ENTRY(ADD16ri8_DB, ADD16mi8, TB_NO_REVERSE)
ENTRY(ADD16rr_DB, ADD16mr, TB_NO_REVERSE)
ENTRY(ADD32ri_DB, ADD32mi, TB_NO_REVERSE)
-ENTRY(ADD32ri8_DB, ADD32mi8, TB_NO_REVERSE)
ENTRY(ADD32rr_DB, ADD32mr, TB_NO_REVERSE)
ENTRY(ADD64ri32_DB, ADD64mi32, TB_NO_REVERSE)
-ENTRY(ADD64ri8_DB, ADD64mi8, TB_NO_REVERSE)
ENTRY(ADD64rr_DB, ADD64mr, TB_NO_REVERSE)
ENTRY(ADD8ri_DB, ADD8mi, TB_NO_REVERSE)
ENTRY(ADD8rr_DB, ADD8mr, TB_NO_REVERSE)
More information about the llvm-commits
mailing list