[llvm] 0b7f6cc - GlobalISel: Add generic instructions for memory intrinsics
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 26 17:08:52 PDT 2020
Author: Matt Arsenault
Date: 2020-08-26T20:08:45-04:00
New Revision: 0b7f6cc71a72a85f8a0cbee836a7a8e31876951a
URL: https://github.com/llvm/llvm-project/commit/0b7f6cc71a72a85f8a0cbee836a7a8e31876951a
DIFF: https://github.com/llvm/llvm-project/commit/0b7f6cc71a72a85f8a0cbee836a7a8e31876951a.diff
LOG: GlobalISel: Add generic instructions for memory intrinsics
AArch64, X86 and Mips currently directly consumes these and custom
lowering to produce a libcall, but really these should follow the
normal legalization process through the libcall/lower action.
Added:
llvm/test/MachineVerifier/test_g_memcpy.mir
llvm/test/MachineVerifier/test_g_memset.mir
Modified:
llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
llvm/include/llvm/Support/TargetOpcodes.def
llvm/include/llvm/Target/GenericOpcodes.td
llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
llvm/lib/CodeGen/MachineVerifier.cpp
llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
llvm/lib/Target/X86/X86LegalizerInfo.cpp
llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-et-al.mir
llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-with-debug-info.mir
llvm/test/CodeGen/AArch64/GlobalISel/legalize-memlib-debug-loc.mir
llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll
llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir
llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
Removed:
llvm/test/MachineVerifier/test_memccpy_intrinsics.mir
################################################################################
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index ce16eee45fd2..033d5b4b5834 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -217,7 +217,7 @@ class IRTranslator : public MachineFunctionPass {
/// Translate an LLVM string intrinsic (memcpy, memset, ...).
bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
- Intrinsic::ID ID);
+ unsigned Opcode);
void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index ce1f92aca9bb..db36fc42aa2a 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -697,10 +697,19 @@ HANDLE_TARGET_OPCODE(G_READ_REGISTER)
/// write_register intrinsic
HANDLE_TARGET_OPCODE(G_WRITE_REGISTER)
+/// llvm.memcpy intrinsic
+HANDLE_TARGET_OPCODE(G_MEMCPY)
+
+/// llvm.memmove intrinsic
+HANDLE_TARGET_OPCODE(G_MEMMOVE)
+
+/// llvm.memset intrinsic
+HANDLE_TARGET_OPCODE(G_MEMSET)
+
/// Marker for the end of the generic opcode.
/// This is used to check if an opcode is in the range of the
/// generic opcodes.
-HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPCODE_END, G_WRITE_REGISTER)
+HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPCODE_END, G_MEMSET)
/// BUILTIN_OP_END - This must be the last enum value in this list.
/// The target-specific post-isel opcode values start here.
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 87f0e4b61d31..a33b7f4a1396 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -1272,3 +1272,30 @@ def G_STRICT_FDIV : ConstrainedIntruction<G_FDIV>;
def G_STRICT_FREM : ConstrainedIntruction<G_FREM>;
def G_STRICT_FMA : ConstrainedIntruction<G_FMA>;
def G_STRICT_FSQRT : ConstrainedIntruction<G_FSQRT>;
+
+//------------------------------------------------------------------------------
+// Memory intrinsics
+//------------------------------------------------------------------------------
+
+def G_MEMCPY : GenericInstruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins ptype0:$dst_addr, ptype1:$src_addr, type2:$size, untyped_imm_0:$tailcall);
+ let hasSideEffects = 0;
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
+def G_MEMMOVE : GenericInstruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins ptype0:$dst_addr, ptype1:$src_addr, type2:$size, untyped_imm_0:$tailcall);
+ let hasSideEffects = 0;
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
+def G_MEMSET : GenericInstruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins ptype0:$dst_addr, type1:$value, type2:$size, untyped_imm_0:$tailcall);
+ let hasSideEffects = 0;
+ let mayStore = 1;
+}
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 93e78fe142d3..232f6d672c48 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -1385,13 +1385,11 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
}
bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
+ const unsigned Opc = MI.getOpcode();
// This combine is fairly complex so it's not written with a separate
// matcher function.
- assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
- Intrinsic::ID ID = (Intrinsic::ID)MI.getIntrinsicID();
- assert((ID == Intrinsic::memcpy || ID == Intrinsic::memmove ||
- ID == Intrinsic::memset) &&
- "Expected a memcpy like intrinsic");
+ assert((Opc == TargetOpcode::G_MEMCPY || Opc == TargetOpcode::G_MEMMOVE ||
+ Opc == TargetOpcode::G_MEMSET) && "Expected memcpy like instruction");
auto MMOIt = MI.memoperands_begin();
const MachineMemOperand *MemOp = *MMOIt;
@@ -1402,11 +1400,11 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
Align DstAlign = MemOp->getBaseAlign();
Align SrcAlign;
- Register Dst = MI.getOperand(1).getReg();
- Register Src = MI.getOperand(2).getReg();
- Register Len = MI.getOperand(3).getReg();
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+ Register Len = MI.getOperand(2).getReg();
- if (ID != Intrinsic::memset) {
+ if (Opc != TargetOpcode::G_MEMSET) {
assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
MemOp = *(++MMOIt);
SrcAlign = MemOp->getBaseAlign();
@@ -1426,11 +1424,11 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
if (MaxLen && KnownLen > MaxLen)
return false;
- if (ID == Intrinsic::memcpy)
+ if (Opc == TargetOpcode::G_MEMCPY)
return optimizeMemcpy(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
- if (ID == Intrinsic::memmove)
+ if (Opc == TargetOpcode::G_MEMMOVE)
return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
- if (ID == Intrinsic::memset)
+ if (Opc == TargetOpcode::G_MEMSET)
return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile);
return false;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 46041f0a8a82..d1d4bb1da03f 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1283,14 +1283,13 @@ bool IRTranslator::translateGetElementPtr(const User &U,
bool IRTranslator::translateMemFunc(const CallInst &CI,
MachineIRBuilder &MIRBuilder,
- Intrinsic::ID ID) {
+ unsigned Opcode) {
// If the source is undef, then just emit a nop.
if (isa<UndefValue>(CI.getArgOperand(1)))
return true;
- ArrayRef<Register> Res;
- auto ICall = MIRBuilder.buildIntrinsic(ID, Res, true);
+ auto ICall = MIRBuilder.buildInstr(Opcode);
for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI)
ICall.addUse(getOrCreateVReg(**AI));
@@ -1321,7 +1320,7 @@ bool IRTranslator::translateMemFunc(const CallInst &CI,
ICall.addMemOperand(MF->getMachineMemOperand(
MachinePointerInfo(CI.getArgOperand(0)),
MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
- if (ID != Intrinsic::memset)
+ if (Opcode != TargetOpcode::G_MEMSET)
ICall.addMemOperand(MF->getMachineMemOperand(
MachinePointerInfo(CI.getArgOperand(1)),
MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
@@ -1713,9 +1712,11 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
MachineInstr::copyFlagsFromInstruction(CI));
return true;
case Intrinsic::memcpy:
+ return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
case Intrinsic::memmove:
+ return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
case Intrinsic::memset:
- return translateMemFunc(CI, MIRBuilder, ID);
+ return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
case Intrinsic::eh_typeid_for: {
GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
Register Reg = getOrCreateVReg(CI);
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index c6720a19c9d4..347fe7b0ee98 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -558,12 +558,11 @@ simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size,
LegalizerHelper::LegalizeResult
llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
MachineInstr &MI) {
- assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
SmallVector<CallLowering::ArgInfo, 3> Args;
// Add all the args, except for the last which is an imm denoting 'tail'.
- for (unsigned i = 1; i < MI.getNumOperands() - 1; i++) {
+ for (unsigned i = 0; i < MI.getNumOperands() - 1; ++i) {
Register Reg = MI.getOperand(i).getReg();
// Need derive an IR type for call lowering.
@@ -578,30 +577,27 @@ llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
- Intrinsic::ID ID = MI.getOperand(0).getIntrinsicID();
RTLIB::Libcall RTLibcall;
- switch (ID) {
- case Intrinsic::memcpy:
+ switch (MI.getOpcode()) {
+ case TargetOpcode::G_MEMCPY:
RTLibcall = RTLIB::MEMCPY;
break;
- case Intrinsic::memset:
- RTLibcall = RTLIB::MEMSET;
- break;
- case Intrinsic::memmove:
+ case TargetOpcode::G_MEMMOVE:
RTLibcall = RTLIB::MEMMOVE;
break;
+ case TargetOpcode::G_MEMSET:
+ RTLibcall = RTLIB::MEMSET;
+ break;
default:
return LegalizerHelper::UnableToLegalize;
}
const char *Name = TLI.getLibcallName(RTLibcall);
- MIRBuilder.setInstrAndDebugLoc(MI);
-
CallLowering::CallLoweringInfo Info;
Info.CallConv = TLI.getLibcallCallingConv(RTLibcall);
Info.Callee = MachineOperand::CreateES(Name);
Info.OrigRet = CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx));
- Info.IsTailCall = MI.getOperand(MI.getNumOperands() - 1).getImm() == 1 &&
+ Info.IsTailCall = MI.getOperand(MI.getNumOperands() - 1).getImm() &&
isLibCallInTailPosition(MIRBuilder.getTII(), MI);
std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs));
@@ -748,6 +744,13 @@ LegalizerHelper::libcall(MachineInstr &MI) {
return Status;
break;
}
+ case TargetOpcode::G_MEMCPY:
+ case TargetOpcode::G_MEMMOVE:
+ case TargetOpcode::G_MEMSET: {
+ LegalizeResult Result = createMemLibcall(MIRBuilder, *MIRBuilder.getMRI(), MI);
+ MI.eraseFromParent();
+ return Result;
+ }
}
MI.eraseFromParent();
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 26c2914b77ec..315d313d0208 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1357,20 +1357,7 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
break;
}
}
- switch (IntrID) {
- case Intrinsic::memcpy:
- if (MI->getNumOperands() != 5)
- report("Expected memcpy intrinsic to have 5 operands", MI);
- break;
- case Intrinsic::memmove:
- if (MI->getNumOperands() != 5)
- report("Expected memmove intrinsic to have 5 operands", MI);
- break;
- case Intrinsic::memset:
- if (MI->getNumOperands() != 5)
- report("Expected memset intrinsic to have 5 operands", MI);
- break;
- }
+
break;
}
case TargetOpcode::G_SEXT_INREG: {
@@ -1448,6 +1435,61 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
}
break;
}
+ case TargetOpcode::G_MEMCPY:
+ case TargetOpcode::G_MEMMOVE: {
+ ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
+ if (MMOs.size() != 2) {
+ report("memcpy/memmove must have 2 memory operands", MI);
+ break;
+ }
+
+ if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
+ (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
+ report("wrong memory operand types", MI);
+ break;
+ }
+
+ if (MMOs[0]->getSize() != MMOs[1]->getSize())
+ report("inconsistent memory operand sizes", MI);
+
+ LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
+ LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
+
+ if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
+ report("memory instruction operand must be a pointer", MI);
+ break;
+ }
+
+ if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
+ report("inconsistent store address space", MI);
+ if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
+ report("inconsistent load address space", MI);
+
+ break;
+ }
+ case TargetOpcode::G_MEMSET: {
+ ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
+ if (MMOs.size() != 1) {
+ report("memset must have 1 memory operand", MI);
+ break;
+ }
+
+ if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
+ report("memset memory operand must be a store", MI);
+ break;
+ }
+
+ LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
+ if (!DstPtrTy.isPointer()) {
+ report("memset operand must be a pointer", MI);
+ break;
+ }
+
+ if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
+ report("inconsistent memset address space", MI);
+
+ break;
+ }
default:
break;
}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 063c451440dc..77e5f374c1af 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -622,6 +622,8 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
getActionDefinitionsBuilder(G_DYN_STACKALLOC).lower();
+ getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
+
computeTables();
verify(*ST.getInstrInfo());
}
@@ -706,19 +708,6 @@ bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(MachineInstr &MI,
bool AArch64LegalizerInfo::legalizeIntrinsic(
LegalizerHelper &Helper, MachineInstr &MI) const {
- MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
- switch (MI.getIntrinsicID()) {
- case Intrinsic::memcpy:
- case Intrinsic::memset:
- case Intrinsic::memmove:
- if (createMemLibcall(MIRBuilder, *MIRBuilder.getMRI(), MI) ==
- LegalizerHelper::UnableToLegalize)
- return false;
- MI.eraseFromParent();
- return true;
- default:
- break;
- }
return true;
}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
index 9a1f200d5222..595e12237747 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
@@ -96,24 +96,6 @@ bool AArch64PreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
CombinerHelper Helper(Observer, B, KB, MDT);
AArch64GenPreLegalizerCombinerHelper Generated(GeneratedRuleCfg, Helper);
- switch (MI.getOpcode()) {
- case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
- switch (MI.getIntrinsicID()) {
- case Intrinsic::memcpy:
- case Intrinsic::memmove:
- case Intrinsic::memset: {
- // If we're at -O0 set a maxlen of 32 to inline, otherwise let the other
- // heuristics decide.
- unsigned MaxLen = EnableOpt ? 0 : 32;
- // Try to inline memcpy type calls if optimizations are enabled.
- return (!EnableMinSize) ? Helper.tryCombineMemCpyFamily(MI, MaxLen)
- : false;
- }
- default:
- break;
- }
- }
-
if (Generated.tryCombineAll(Observer, MI, B))
return true;
@@ -122,6 +104,15 @@ bool AArch64PreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
return Helper.tryCombineConcatVectors(MI);
case TargetOpcode::G_SHUFFLE_VECTOR:
return Helper.tryCombineShuffleVector(MI);
+ case TargetOpcode::G_MEMCPY:
+ case TargetOpcode::G_MEMMOVE:
+ case TargetOpcode::G_MEMSET: {
+ // If we're at -O0 set a maxlen of 32 to inline, otherwise let the other
+ // heuristics decide.
+ unsigned MaxLen = EnableOpt ? 0 : 32;
+ // Try to inline memcpy type calls if optimizations are enabled.
+ return !EnableMinSize ? Helper.tryCombineMemCpyFamily(MI, MaxLen) : false;
+ }
}
return false;
diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
index b489c8137769..2692c08b93de 100644
--- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
@@ -322,6 +322,8 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
getActionDefinitionsBuilder(G_SEXT_INREG).lower();
+ getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
+
computeTables();
verify(*ST.getInstrInfo());
}
@@ -500,7 +502,6 @@ static bool MSA2OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode,
bool MipsLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
- MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
const MipsSubtarget &ST =
static_cast<const MipsSubtarget &>(MI.getMF()->getSubtarget());
const MipsInstrInfo &TII = *ST.getInstrInfo();
@@ -508,14 +509,6 @@ bool MipsLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
const RegisterBankInfo &RBI = *ST.getRegBankInfo();
switch (MI.getIntrinsicID()) {
- case Intrinsic::memcpy:
- case Intrinsic::memset:
- case Intrinsic::memmove:
- if (createMemLibcall(MIRBuilder, MRI, MI) ==
- LegalizerHelper::UnableToLegalize)
- return false;
- MI.eraseFromParent();
- return true;
case Intrinsic::trap: {
MachineInstr *Trap = MIRBuilder.buildInstr(Mips::TRAP);
MI.eraseFromParent();
diff --git a/llvm/lib/Target/X86/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/X86LegalizerInfo.cpp
index 9ee0bdc3430d..1b371ac2a108 100644
--- a/llvm/lib/Target/X86/X86LegalizerInfo.cpp
+++ b/llvm/lib/Target/X86/X86LegalizerInfo.cpp
@@ -86,25 +86,14 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
setLegalizeScalarToDifferentSizeStrategy(
G_CONSTANT, 0, widenToLargerTypesAndNarrowToLargest);
+ getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
+
computeTables();
verify(*STI.getInstrInfo());
}
bool X86LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
- MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
- switch (MI.getIntrinsicID()) {
- case Intrinsic::memcpy:
- case Intrinsic::memset:
- case Intrinsic::memmove:
- if (createMemLibcall(MIRBuilder, *MIRBuilder.getMRI(), MI) ==
- LegalizerHelper::UnableToLegalize)
- return false;
- MI.eraseFromParent();
- return true;
- default:
- break;
- }
return true;
}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index a896b05512dd..946e12c9e5ab 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -1138,7 +1138,7 @@ define void @test_memcpy(i8* %dst, i8* %src, i64 %size) {
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
-; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store 1 into %ir.dst), (load 1 from %ir.src)
+; CHECK: G_MEMCPY [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store 1 into %ir.dst), (load 1 from %ir.src)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
ret void
}
@@ -1148,7 +1148,7 @@ define void @test_memcpy_tail(i8* %dst, i8* %src, i64 %size) {
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
-; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 1 :: (store 1 into %ir.dst), (load 1 from %ir.src)
+; CHECK: G_MEMCPY [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 1 :: (store 1 into %ir.dst), (load 1 from %ir.src)
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
ret void
}
@@ -1159,7 +1159,7 @@ define void @test_memcpy_nonzero_as(i8 addrspace(1)* %dst, i8 addrspace(1) * %sr
; CHECK: [[DST:%[0-9]+]]:_(p1) = COPY $x0
; CHECK: [[SRC:%[0-9]+]]:_(p1) = COPY $x1
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
-; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p1), [[SRC]](p1), [[SIZE]](s64), 0 :: (store 1 into %ir.dst, addrspace 1), (load 1 from %ir.src, addrspace 1)
+; CHECK: G_MEMCPY [[DST]](p1), [[SRC]](p1), [[SIZE]](s64), 0 :: (store 1 into %ir.dst, addrspace 1), (load 1 from %ir.src, addrspace 1)
call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %size, i1 0)
ret void
}
@@ -1170,7 +1170,7 @@ define void @test_memmove(i8* %dst, i8* %src, i64 %size) {
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
-; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store 1 into %ir.dst), (load 1 from %ir.src)
+; CHECK: G_MEMMOVE [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store 1 into %ir.dst), (load 1 from %ir.src)
call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
ret void
}
@@ -1182,7 +1182,7 @@ define void @test_memset(i8* %dst, i8 %val, i64 %size) {
; CHECK: [[SRC_C:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[SRC:%[0-9]+]]:_(s8) = G_TRUNC [[SRC_C]]
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
-; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), [[DST]](p0), [[SRC]](s8), [[SIZE]](s64), 0 :: (store 1 into %ir.dst)
+; CHECK: G_MEMSET [[DST]](p0), [[SRC]](s8), [[SIZE]](s64), 0 :: (store 1 into %ir.dst)
call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i1 0)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
index ed39bd46de34..0eeed417cb99 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
@@ -70,12 +70,12 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
- ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ ; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = COPY $x2
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
RET_ReallyLR
...
@@ -122,7 +122,7 @@ body: |
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 72
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
RET_ReallyLR
...
@@ -169,7 +169,7 @@ body: |
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 72
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
RET_ReallyLR
...
@@ -191,12 +191,12 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72
- ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ ; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 72
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
RET_ReallyLR
...
@@ -263,7 +263,7 @@ body: |
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 143
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
RET_ReallyLR
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
index 39384188bb07..c4444731fbc7 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
@@ -55,12 +55,12 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
- ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ ; CHECK: G_MEMMOVE [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = COPY $x2
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
RET_ReallyLR
...
@@ -94,7 +94,7 @@ body: |
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 48
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
RET_ReallyLR
...
@@ -111,12 +111,12 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
- ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ ; CHECK: G_MEMMOVE [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 96
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
RET_ReallyLR
...
@@ -156,7 +156,7 @@ body: |
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 52
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
RET_ReallyLR
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
index a736f57c4383..cea0af2ff0af 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
@@ -67,14 +67,14 @@ body: |
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
- ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), [[COPY]](p0), [[TRUNC]](s8), [[ZEXT]](s64), 1 :: (store 1 into %ir.dst)
+ ; CHECK: G_MEMSET [[COPY]](p0), [[TRUNC]](s8), [[ZEXT]](s64), 1 :: (store 1 into %ir.dst)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%2:_(s32) = COPY $w2
%3:_(s8) = G_TRUNC %1(s32)
%4:_(s64) = G_ZEXT %2(s32)
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %3(s8), %4(s64), 1 :: (store 1 into %ir.dst)
+ G_MEMSET %0(p0), %3(s8), %4(s64), 1 :: (store 1 into %ir.dst)
RET_ReallyLR
...
@@ -103,7 +103,7 @@ body: |
%1:_(s32) = COPY $w1
%3:_(s64) = G_CONSTANT i64 16
%2:_(s8) = G_TRUNC %1(s32)
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
+ G_MEMSET %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
RET_ReallyLR
...
@@ -135,7 +135,7 @@ body: |
%1:_(s32) = G_CONSTANT i32 0
%3:_(s64) = G_CONSTANT i64 64
%2:_(s8) = G_TRUNC %1(s32)
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
+ G_MEMSET %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
RET_ReallyLR
...
@@ -160,7 +160,7 @@ body: |
%0:_(p0) = COPY $x0
%1:_(s8) = G_CONSTANT i8 64
%2:_(s64) = G_CONSTANT i64 16
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %1(s8), %2(s64), 1 :: (store 1 into %ir.dst)
+ G_MEMSET %0(p0), %1(s8), %2(s64), 1 :: (store 1 into %ir.dst)
RET_ReallyLR
...
@@ -196,7 +196,7 @@ body: |
%1:_(s32) = COPY $w1
%3:_(s64) = G_CONSTANT i64 60
%2:_(s8) = G_TRUNC %1(s32)
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
+ G_MEMSET %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
RET_ReallyLR
...
@@ -224,7 +224,7 @@ body: |
%0:_(p0) = COPY $x0
%1:_(s8) = G_CONSTANT i8 64
%2:_(s64) = G_CONSTANT i64 18
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %1(s8), %2(s64), 1 :: (store 1 into %ir.dst)
+ G_MEMSET %0(p0), %1(s8), %2(s64), 1 :: (store 1 into %ir.dst)
RET_ReallyLR
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
index fb4444124ad9..cf74772a125e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
@@ -53,7 +53,7 @@ body: |
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 32
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
RET_ReallyLR
...
@@ -75,12 +75,12 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 36
- ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ ; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 36
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
RET_ReallyLR
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-et-al.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-et-al.mir
index 8bc5bcf11836..91f7d019eb24 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-et-al.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-et-al.mir
@@ -25,7 +25,7 @@ body: |
%1:_(p0) = COPY $x1
%2:_(s32) = COPY $w2
%3:_(s64) = G_ZEXT %2(s32)
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), 0
+ G_MEMCPY %0(p0), %1(p0), %3(s64), 0 :: (store unknown-size), (load unknown-size)
RET_ReallyLR
...
@@ -50,7 +50,7 @@ body: |
%1:_(p0) = COPY $x1
%2:_(s32) = COPY $w2
%3:_(s64) = G_ZEXT %2(s32)
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), 1
+ G_MEMCPY %0(p0), %1(p0), %3(s64), 1 :: (store unknown-size), (load unknown-size)
RET_ReallyLR
...
@@ -78,7 +78,7 @@ body: |
%1:_(p0) = COPY $x1
%2:_(s32) = COPY $w2
%3:_(s64) = G_ZEXT %2(s32)
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %3(s64), 0
+ G_MEMMOVE %0(p0), %1(p0), %3(s64), 0 :: (store unknown-size), (load unknown-size)
RET_ReallyLR
...
@@ -108,7 +108,7 @@ body: |
%2:_(s32) = COPY $w2
%3:_(s8) = G_TRUNC %1(s32)
%4:_(s64) = G_ZEXT %2(s32)
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %3(s8), %4(s64), 0
+ G_MEMSET %0(p0), %3(s8), %4(s64), 0 :: (store unknown-size)
RET_ReallyLR
...
@@ -137,7 +137,7 @@ body: |
%1:_(p0) = COPY $x1
%2:_(s32) = COPY $w2
%3:_(s64) = G_ZEXT %2(s32)
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), 1
+ G_MEMCPY %0(p0), %1(p0), %3(s64), 1 :: (store unknown-size), (load unknown-size)
$x0 = COPY %3
RET_ReallyLR implicit $x0
@@ -166,5 +166,5 @@ body: |
%2:_(s32) = COPY $w2
%4:_(s1) = G_CONSTANT i1 false
%3:_(s64) = G_ZEXT %2(s32)
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), 1
+ G_MEMCPY %0(p0), %1(p0), %3(s64), 1 :: (store unknown-size), (load unknown-size)
TCRETURNdi &memset, 0, csr_aarch64_aapcs, implicit $sp
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-with-debug-info.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-with-debug-info.mir
index 204694e84953..26c9f579b821 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-with-debug-info.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-with-debug-info.mir
@@ -51,7 +51,7 @@ body: |
%1:_(p0) = COPY $x1, debug-location !DILocation(line: 3, column: 1, scope: !11)
%2:_(s32) = COPY $w2, debug-location !DILocation(line: 4, column: 1, scope: !11)
%3:_(s64) = G_ZEXT %2(s32), debug-location !DILocation(line: 5, column: 1, scope: !11)
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), 1, debug-location !DILocation(line: 6, column: 1, scope: !11)
+ G_MEMCPY %0(p0), %1(p0), %3(s64), 1, debug-location !DILocation(line: 6, column: 1, scope: !11) :: (store unknown-size), (load unknown-size)
DBG_VALUE 0, $noreg, !13, !DIExpression(), debug-location !DILocation(line: 6, column: 1, scope: !11)
DBG_VALUE 0, $noreg, !13, !DIExpression(), debug-location !DILocation(line: 6, column: 1, scope: !11)
RET_ReallyLR debug-location !DILocation(line: 7, column: 1, scope: !11)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memlib-debug-loc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memlib-debug-loc.mir
index 6a5df883acd0..e26b037cbefe 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memlib-debug-loc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memlib-debug-loc.mir
@@ -54,7 +54,7 @@ body: |
%2:_(s32) = COPY $w2
%3:_(s64) = G_ZEXT %2(s32), debug-location !11
%4:_(s8) = G_TRUNC %1(s32), debug-location !11
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %4(s8), %3(s64), 0, debug-location !11 :: (store 1 into %ir.ptr)
+ G_MEMSET %0(p0), %4(s8), %3(s64), 0, debug-location !11 :: (store 1 into %ir.ptr)
RET_ReallyLR debug-location !12
...
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll
index 4cc956c2040c..67265c95b64b 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll
@@ -153,7 +153,7 @@ define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 s
; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
; MIPS32: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
- ; MIPS32: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32), 0 :: (store 1 into %ir.dest), (load 1 from %ir.src)
+ ; MIPS32: G_MEMCPY [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32), 0 :: (store 1 into %ir.dest), (load 1 from %ir.src)
; MIPS32: RetRA
; MIPS32_PIC-LABEL: name: call_symbol
; MIPS32_PIC: bb.1.entry:
@@ -161,7 +161,7 @@ define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 s
; MIPS32_PIC: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
; MIPS32_PIC: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
; MIPS32_PIC: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
- ; MIPS32_PIC: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32), 0 :: (store 1 into %ir.dest), (load 1 from %ir.src)
+ ; MIPS32_PIC: G_MEMCPY [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32), 0 :: (store 1 into %ir.dest), (load 1 from %ir.src)
; MIPS32_PIC: RetRA
entry:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 %length, i1 false)
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir
index 773933018ed3..7f9f561c4b41 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir
@@ -73,7 +73,7 @@ body: |
%8:_(s32) = G_CONSTANT i32 -8
%9:_(s32) = G_AND %7, %8
%10:_(p0) = G_DYN_STACKALLOC %9(s32), 0
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %10(p0), %0(s8), %1(s32), 0 :: (store 1 into %ir.vla)
+ G_MEMSET %10(p0), %0(s8), %1(s32), 0 :: (store 1 into %ir.vla)
%11:_(p0) = G_PTR_ADD %10, %1(s32)
%12:_(p0) = COPY %11(p0)
G_STORE %13(s8), %12(p0) :: (store 1 into %ir.arrayidx)
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
index 1d5e55c282c5..b33daf570d45 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
@@ -19,7 +19,7 @@ define float @test_return_f1(float %f.coerce) {
; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.f
; ALL: G_STORE [[TRUNC]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2)
- ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ ; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; ALL: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 4 from %ir.coerce.dive13)
; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s32)
; ALL: $xmm0 = COPY [[ANYEXT]](s128)
@@ -49,7 +49,7 @@ define double @test_return_d1(double %d.coerce) {
; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.d
; ALL: G_STORE [[TRUNC]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.coerce.dive2)
- ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.0, align 8), (load 1 from %ir.1, align 8)
+ ; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.0, align 8), (load 1 from %ir.1, align 8)
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 8 from %ir.coerce.dive13)
; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s64)
; ALL: $xmm0 = COPY [[ANYEXT]](s128)
@@ -82,7 +82,7 @@ define { double, double } @test_return_d2(double %d.coerce0, double %d.coerce1)
; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; ALL: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX1]], [[C1]](s64)
; ALL: G_STORE [[TRUNC1]](s64), [[PTR_ADD]](p0) :: (store 8 into %ir.2)
- ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.3, align 8), (load 1 from %ir.4, align 8)
+ ; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.3, align 8), (load 1 from %ir.4, align 8)
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 8 from %ir.5)
; ALL: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
; ALL: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (dereferenceable load 8 from %ir.5 + 8)
@@ -116,7 +116,7 @@ define i32 @test_return_i1(i32 %i.coerce) {
; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i
; ALL: G_STORE [[COPY]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2)
- ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
+ ; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; ALL: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 4 from %ir.coerce.dive13)
; ALL: $eax = COPY [[LOAD]](s32)
; ALL: RET 0, implicit $eax
@@ -142,7 +142,7 @@ define i64 @test_return_i2(i64 %i.coerce) {
; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i
; ALL: G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.0, align 4)
- ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.1, align 4), (load 1 from %ir.2, align 4)
+ ; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.1, align 4), (load 1 from %ir.2, align 4)
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 8 from %ir.3, align 4)
; ALL: $rax = COPY [[LOAD]](s64)
; ALL: RET 0, implicit $rax
@@ -174,9 +174,9 @@ define { i64, i32 } @test_return_i3(i64 %i.coerce0, i32 %i.coerce1) {
; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; ALL: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX2]], [[C1]](s64)
; ALL: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store 4 into %ir.1)
- ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX1]](p0), [[FRAME_INDEX2]](p0), [[C]](s64), 0 :: (store 1 into %ir.2, align 4), (load 1 from %ir.3, align 4)
- ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.4, align 4), (load 1 from %ir.5, align 4)
- ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX3]](p0), [[FRAME_INDEX]](p0), [[C]](s64), 0 :: (store 1 into %ir.6, align 8), (load 1 from %ir.7, align 4)
+ ; ALL: G_MEMCPY [[FRAME_INDEX1]](p0), [[FRAME_INDEX2]](p0), [[C]](s64), 0 :: (store 1 into %ir.2, align 4), (load 1 from %ir.3, align 4)
+ ; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.4, align 4), (load 1 from %ir.5, align 4)
+ ; ALL: G_MEMCPY [[FRAME_INDEX3]](p0), [[FRAME_INDEX]](p0), [[C]](s64), 0 :: (store 1 into %ir.6, align 8), (load 1 from %ir.7, align 4)
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX3]](p0) :: (dereferenceable load 8 from %ir.tmp)
; ALL: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX3]], [[C1]](s64)
; ALL: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (dereferenceable load 4 from %ir.tmp + 8, align 8)
@@ -218,7 +218,7 @@ define { i64, i64 } @test_return_i4(i64 %i.coerce0, i64 %i.coerce1) {
; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; ALL: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX1]], [[C1]](s64)
; ALL: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store 8 into %ir.2, align 4)
- ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.3, align 4), (load 1 from %ir.4, align 4)
+ ; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.3, align 4), (load 1 from %ir.4, align 4)
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 8 from %ir.5, align 4)
; ALL: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
; ALL: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (dereferenceable load 8 from %ir.5 + 8, align 4)
diff --git a/llvm/test/MachineVerifier/test_g_memcpy.mir b/llvm/test/MachineVerifier/test_g_memcpy.mir
new file mode 100644
index 000000000000..2f4ca2c7565b
--- /dev/null
+++ b/llvm/test/MachineVerifier/test_g_memcpy.mir
@@ -0,0 +1,49 @@
+#RUN: not --crash llc -o - -march=arm64 -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s
+---
+name: test_memcpy
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+body: |
+ bb.0:
+
+ %0:_(p0) = G_CONSTANT i64 0
+ %1:_(p0) = G_CONSTANT i64 4
+ %2:_(s64) = G_CONSTANT i64 4
+
+ ; CHECK: *** Bad machine code: memcpy/memmove must have 2 memory operands ***
+ G_MEMCPY %0, %1, %2, 0
+
+ ; CHECK: *** Bad machine code: memcpy/memmove must have 2 memory operands ***
+ G_MEMCPY %0, %1, %2, 0 :: (load 4)
+
+ ; CHECK: *** Bad machine code: memcpy/memmove must have 2 memory operands ***
+ G_MEMCPY %0, %1, %2, 0 :: (store 4)
+
+ ; CHECK: *** Bad machine code: wrong memory operand types ***
+ G_MEMCPY %0, %1, %2, 0 :: (load 4), (store 4)
+
+ ; CHECK: *** Bad machine code: inconsistent memory operand sizes ***
+ G_MEMCPY %0, %1, %2, 0 :: (store 8), (load 4)
+
+ ; CHECK: *** Bad machine code: inconsistent memory operand sizes ***
+ G_MEMCPY %0, %1, %2, 0 :: (store unknown-size), (load 4)
+
+ ; CHECK: *** Bad machine code: inconsistent memory operand sizes ***
+ G_MEMCPY %0, %1, %2, 0 :: (store 8), (load unknown-size)
+
+ ; CHECK: *** Bad machine code: inconsistent store address space ***
+ G_MEMCPY %0, %1, %2, 0 :: (store 4, addrspace 1), (load 4)
+
+ ; CHECK: *** Bad machine code: inconsistent load address space ***
+ G_MEMCPY %0, %1, %2, 0 :: (store 4), (load 4, addrspace 1)
+
+ ; CHECK: *** Bad machine code: memory instruction operand must be a pointer ***
+ G_MEMCPY %2, %0, %2, 0 :: (store 4), (load 4)
+
+ ; CHECK: *** Bad machine code: memory instruction operand must be a pointer ***
+ G_MEMCPY %0, %2, %2, 0 :: (store 4), (load 4)
+
+...
diff --git a/llvm/test/MachineVerifier/test_g_memset.mir b/llvm/test/MachineVerifier/test_g_memset.mir
new file mode 100644
index 000000000000..66ba1845eb7b
--- /dev/null
+++ b/llvm/test/MachineVerifier/test_g_memset.mir
@@ -0,0 +1,32 @@
+#RUN: not --crash llc -o - -march=arm64 -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s
+---
+name: test_memset
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+body: |
+ bb.0:
+
+ %0:_(p0) = G_CONSTANT i64 0
+ %1:_(s64) = G_CONSTANT i64 4
+ %2:_(s8) = G_CONSTANT i8 7
+
+ ; CHECK: *** Bad machine code: memset must have 1 memory operand ***
+ G_MEMSET %0, %1, %2, 0
+
+ ; CHECK: *** Bad machine code: memset memory operand must be a store ***
+ G_MEMSET %0, %1, %2, 0 :: (load 4)
+
+ ; CHECK: *** Bad machine code: Missing mayLoad flag ***
+ ; CHECK: *** Bad machine code: memset memory operand must be a store ***
+ G_MEMSET %0, %1, %2, 0 :: (load store 4)
+
+ ; CHECK: *** Bad machine code: inconsistent memset address space ***
+ G_MEMSET %0, %1, %2, 0 :: (store 4, addrspace 1)
+
+ ; CHECK: *** Bad machine code: memset operand must be a pointer ***
+ G_MEMSET %1, %1, %2, 0 :: (store 4)
+
+...
diff --git a/llvm/test/MachineVerifier/test_memccpy_intrinsics.mir b/llvm/test/MachineVerifier/test_memccpy_intrinsics.mir
deleted file mode 100644
index 03ba9e0d06f2..000000000000
--- a/llvm/test/MachineVerifier/test_memccpy_intrinsics.mir
+++ /dev/null
@@ -1,27 +0,0 @@
-# RUN: not --crash llc -o - -march=aarch64 -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s
-# REQUIRES: aarch64-registered-target
-
----
-name: test_memcpy_et_al
-legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-liveins:
-body: |
- bb.0:
-
- %0:_(p0) = G_IMPLICIT_DEF
- %1:_(s64) = G_IMPLICIT_DEF
- %2:_(s1) = G_IMPLICIT_DEF
-
- ; CHECK: Bad machine code: Expected memcpy intrinsic to have 5 operands
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %0(p0), %1(s64)
-
- ; CHECK: Bad machine code: Expected memmove intrinsic to have 5 operands
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %0(p0), %1(s64)
-
- ; CHECK: Bad machine code: Expected memset intrinsic to have 5 operands
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %0(p0), %1(s64)
-
-...
More information about the llvm-commits
mailing list