[llvm] f95a5fb - MachineIRBuilder: Rename buildMerge. NFC
Diana Picus via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 13 00:45:41 PST 2023
Author: Diana Picus
Date: 2023-01-13T09:32:58+01:00
New Revision: f95a5fbe7ce1ddc890868a2b4720b4561af672cf
URL: https://github.com/llvm/llvm-project/commit/f95a5fbe7ce1ddc890868a2b4720b4561af672cf
DIFF: https://github.com/llvm/llvm-project/commit/f95a5fbe7ce1ddc890868a2b4720b4561af672cf.diff
LOG: MachineIRBuilder: Rename buildMerge. NFC
`buildMerge` may build a G_MERGE_VALUES, G_BUILD_VECTOR or
G_CONCAT_VECTORS. Rename it to `buildMergeLikeInstr`.
This is a follow-up suggested in https://reviews.llvm.org/D140964
Differential Revision: https://reviews.llvm.org/D141372
Added:
Modified:
llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
llvm/lib/Target/ARM/ARMCallLowering.cpp
llvm/lib/Target/Mips/MipsCallLowering.cpp
llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
llvm/lib/Target/X86/X86CallLowering.cpp
llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
llvm/unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index 007a5935ecf89..16f63527456a3 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -300,7 +300,7 @@ class LegalizationArtifactCombiner {
for (unsigned i = 0; i < NumSrcs; ++i)
SrcRegs[i] = SrcMerge->getSourceReg(i);
- Builder.buildMerge(DstReg, SrcRegs);
+ Builder.buildMergeLikeInstr(DstReg, SrcRegs);
UpdatedDefs.push_back(DstReg);
} else {
// Unable to combine
@@ -932,7 +932,7 @@ class LegalizationArtifactCombiner {
}
MIB.setInstrAndDebugLoc(MI);
- MIB.buildMerge(Dst, ConcatSources);
+ MIB.buildMergeLikeInstr(Dst, ConcatSources);
DeadInsts.push_back(&MI);
return true;
}
@@ -1099,7 +1099,7 @@ class LegalizationArtifactCombiner {
Regs.push_back(MergeI->getOperand(Idx).getReg());
Register DefReg = MI.getReg(DefIdx);
- Builder.buildMerge(DefReg, Regs);
+ Builder.buildMergeLikeInstr(DefReg, Regs);
UpdatedDefs.push_back(DefReg);
}
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 327b6a1f577dd..08f240a6a5637 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -985,9 +985,10 @@ class MachineIRBuilder {
/// \return a MachineInstrBuilder for the newly created instruction. The
/// opcode of the new instruction will depend on the types of both
/// the destination and the sources.
- MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<Register> Ops);
- MachineInstrBuilder buildMerge(const DstOp &Res,
- std::initializer_list<SrcOp> Ops);
+ MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res,
+ ArrayRef<Register> Ops);
+ MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res,
+ std::initializer_list<SrcOp> Ops);
/// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
///
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 15d6cbcf6a7dd..d59dfc8269347 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -306,8 +306,8 @@ mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
Register UnmergeSrcReg;
if (LCMTy != PartLLT) {
assert(DstRegs.size() == 1);
- return B.buildDeleteTrailingVectorElements(DstRegs[0],
- B.buildMerge(LCMTy, SrcRegs));
+ return B.buildDeleteTrailingVectorElements(
+ DstRegs[0], B.buildMergeLikeInstr(LCMTy, SrcRegs));
} else {
// We don't need to widen anything if we're extracting a scalar which was
// promoted to a vector e.g. s8 -> v4s8 -> s8
@@ -388,9 +388,9 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
unsigned SrcSize = PartLLT.getSizeInBits().getFixedValue() * Regs.size();
if (SrcSize == OrigTy.getSizeInBits())
- B.buildMerge(OrigRegs[0], Regs);
+ B.buildMergeLikeInstr(OrigRegs[0], Regs);
else {
- auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
+ auto Widened = B.buildMergeLikeInstr(LLT::scalar(SrcSize), Regs);
B.buildTrunc(OrigRegs[0], Widened);
}
@@ -458,7 +458,8 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
- auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
+ auto Merge =
+ B.buildMergeLikeInstr(RealDstEltTy, Regs.take_front(PartsPerElt));
// Fix the type in case this is really a vector of pointers.
MRI.setType(Merge.getReg(0), RealDstEltTy);
EltMerges.push_back(Merge.getReg(0));
@@ -549,7 +550,7 @@ static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
SmallVector<Register, 8> MergeParts(1, SrcReg);
for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
MergeParts.push_back(Undef);
- UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
+ UnmergeSrc = B.buildMergeLikeInstr(LCMTy, MergeParts).getReg(0);
}
}
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index f93d1d04dc3c0..af4bb1634746d 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -389,7 +389,7 @@ void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
if (Ops.size() == 1)
Builder.buildCopy(NewDstReg, Ops[0]);
else
- Builder.buildMerge(NewDstReg, Ops);
+ Builder.buildMergeLikeInstr(NewDstReg, Ops);
MI.eraseFromParent();
replaceRegWith(MRI, DstReg, NewDstReg);
@@ -1972,7 +1972,7 @@ void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
}
auto Zero = Builder.buildConstant(HalfTy, 0);
- Builder.buildMerge(DstReg, { Narrowed, Zero });
+ Builder.buildMergeLikeInstr(DstReg, {Narrowed, Zero});
} else if (MI.getOpcode() == TargetOpcode::G_SHL) {
Register Narrowed = Unmerge.getReg(0);
// dst = G_SHL s64:x, C for C >= 32
@@ -1985,7 +1985,7 @@ void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
}
auto Zero = Builder.buildConstant(HalfTy, 0);
- Builder.buildMerge(DstReg, { Zero, Narrowed });
+ Builder.buildMergeLikeInstr(DstReg, {Zero, Narrowed});
} else {
assert(MI.getOpcode() == TargetOpcode::G_ASHR);
auto Hi = Builder.buildAShr(
@@ -1995,13 +1995,13 @@ void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
if (ShiftVal == HalfSize) {
// (G_ASHR i64:x, 32) ->
// G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
- Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi });
+ Builder.buildMergeLikeInstr(DstReg, {Unmerge.getReg(1), Hi});
} else if (ShiftVal == Size - 1) {
// Don't need a second shift.
// (G_ASHR i64:x, 63) ->
// %narrowed = (G_ASHR hi_32(x), 31)
// G_MERGE_VALUES %narrowed, %narrowed
- Builder.buildMerge(DstReg, { Hi, Hi });
+ Builder.buildMergeLikeInstr(DstReg, {Hi, Hi});
} else {
auto Lo = Builder.buildAShr(
HalfTy, Unmerge.getReg(1),
@@ -2009,7 +2009,7 @@ void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
// (G_ASHR i64:x, C) ->, for C >= 32
// G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
- Builder.buildMerge(DstReg, { Lo, Hi });
+ Builder.buildMergeLikeInstr(DstReg, {Lo, Hi});
}
}
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index e21de0822c5ae..8a1fce2d3d650 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -235,7 +235,7 @@ void LegalizerHelper::extractVectorParts(Register Reg, unsigned NumElts,
// Requested sub-vectors of NarrowTy.
for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
- VRegs.push_back(MIRBuilder.buildMerge(NarrowTy, Pieces).getReg(0));
+ VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
}
// Leftover element(s).
@@ -244,7 +244,8 @@ void LegalizerHelper::extractVectorParts(Register Reg, unsigned NumElts,
} else {
LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
- VRegs.push_back(MIRBuilder.buildMerge(LeftoverTy, Pieces).getReg(0));
+ VRegs.push_back(
+ MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
}
}
@@ -257,7 +258,7 @@ void LegalizerHelper::insertParts(Register DstReg,
assert(LeftoverRegs.empty());
if (!ResultTy.isVector()) {
- MIRBuilder.buildMerge(DstReg, PartRegs);
+ MIRBuilder.buildMergeLikeInstr(DstReg, PartRegs);
return;
}
@@ -306,7 +307,7 @@ void LegalizerHelper::mergeMixedSubvectors(Register DstReg,
else
appendVectorElts(AllElts, Leftover);
- MIRBuilder.buildMerge(DstReg, AllElts);
+ MIRBuilder.buildMergeLikeInstr(DstReg, AllElts);
}
/// Append the result registers of G_UNMERGE_VALUES \p MI to \p Regs.
@@ -423,7 +424,7 @@ LLT LegalizerHelper::buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
if (NumSubParts == 1)
Remerge[I] = SubMerge[0];
else
- Remerge[I] = MIRBuilder.buildMerge(NarrowTy, SubMerge).getReg(0);
+ Remerge[I] = MIRBuilder.buildMergeLikeInstr(NarrowTy, SubMerge).getReg(0);
// In the sign extend padding case, re-use the first all-signbit merge.
if (AllMergePartsArePadding && !AllPadReg)
@@ -442,11 +443,11 @@ void LegalizerHelper::buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
// the result.
if (DstTy == LCMTy) {
- MIRBuilder.buildMerge(DstReg, RemergeRegs);
+ MIRBuilder.buildMergeLikeInstr(DstReg, RemergeRegs);
return;
}
- auto Remerge = MIRBuilder.buildMerge(LCMTy, RemergeRegs);
+ auto Remerge = MIRBuilder.buildMergeLikeInstr(LCMTy, RemergeRegs);
if (DstTy.isScalar() && LCMTy.isScalar()) {
MIRBuilder.buildTrunc(DstReg, Remerge);
return;
@@ -460,7 +461,7 @@ void LegalizerHelper::buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
UnmergeDefs[I] = MRI.createGenericVirtualRegister(DstTy);
MIRBuilder.buildUnmerge(UnmergeDefs,
- MIRBuilder.buildMerge(LCMTy, RemergeRegs));
+ MIRBuilder.buildMergeLikeInstr(LCMTy, RemergeRegs));
return;
}
@@ -941,7 +942,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (DstTy.isVector())
MIRBuilder.buildBuildVector(DstReg, DstRegs);
else
- MIRBuilder.buildMerge(DstReg, DstRegs);
+ MIRBuilder.buildMergeLikeInstr(DstReg, DstRegs);
MI.eraseFromParent();
return Legalized;
}
@@ -1013,7 +1014,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
MIRBuilder.buildFreeze(NarrowTy, Unmerge.getReg(i)).getReg(0));
}
- MIRBuilder.buildMerge(MI.getOperand(0).getReg(), Parts);
+ MIRBuilder.buildMergeLikeInstr(MI.getOperand(0).getReg(), Parts);
MI.eraseFromParent();
return Legalized;
}
@@ -1188,7 +1189,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
MIB.addUse(SrcRegs[j / 2][i]).add(MI.getOperand(j + 1));
}
MIRBuilder.setInsertPt(MBB, MBB.getFirstNonPHI());
- MIRBuilder.buildMerge(MI.getOperand(0), DstRegs);
+ MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), DstRegs);
Observer.changedInstr(MI);
MI.eraseFromParent();
return Legalized;
@@ -1365,7 +1366,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
// Gather the destination registers into the final destination.
Register DstReg = MI.getOperand(0).getReg();
- MIRBuilder.buildMerge(DstReg, DstRegs);
+ MIRBuilder.buildMergeLikeInstr(DstReg, DstRegs);
MI.eraseFromParent();
return Legalized;
}
@@ -1385,7 +1386,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
DstRegs.push_back(DstPart.getReg(0));
}
- MIRBuilder.buildMerge(MI.getOperand(0), DstRegs);
+ MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), DstRegs);
Observer.changedInstr(MI);
MI.eraseFromParent();
@@ -1602,16 +1603,17 @@ LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx,
// Build merges of each piece.
ArrayRef<Register> Slicer(Unmerges);
for (int I = 0; I != NumMerge; ++I, Slicer = Slicer.drop_front(PartsPerGCD)) {
- auto Merge = MIRBuilder.buildMerge(WideTy, Slicer.take_front(PartsPerGCD));
+ auto Merge =
+ MIRBuilder.buildMergeLikeInstr(WideTy, Slicer.take_front(PartsPerGCD));
NewMergeRegs.push_back(Merge.getReg(0));
}
// A truncate may be necessary if the requested type doesn't evenly divide the
// original result type.
if (DstTy.getSizeInBits() == WideDstTy.getSizeInBits()) {
- MIRBuilder.buildMerge(DstReg, NewMergeRegs);
+ MIRBuilder.buildMergeLikeInstr(DstReg, NewMergeRegs);
} else {
- auto FinalMerge = MIRBuilder.buildMerge(WideDstTy, NewMergeRegs);
+ auto FinalMerge = MIRBuilder.buildMergeLikeInstr(WideDstTy, NewMergeRegs);
MIRBuilder.buildTrunc(DstReg, FinalMerge.getReg(0));
}
@@ -1739,7 +1741,7 @@ LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx,
RemergeParts.emplace_back(Parts[Idx]);
}
- MIRBuilder.buildMerge(MI.getOperand(I).getReg(), RemergeParts);
+ MIRBuilder.buildMergeLikeInstr(MI.getOperand(I).getReg(), RemergeParts);
RemergeParts.clear();
}
}
@@ -2680,7 +2682,7 @@ LegalizerHelper::lowerBitcast(MachineInstr &MI) {
} else
getUnmergePieces(SrcRegs, MIRBuilder, Src, SrcEltTy);
- MIRBuilder.buildMerge(Dst, SrcRegs);
+ MIRBuilder.buildMergeLikeInstr(Dst, SrcRegs);
MI.eraseFromParent();
return Legalized;
}
@@ -2688,7 +2690,7 @@ LegalizerHelper::lowerBitcast(MachineInstr &MI) {
if (DstTy.isVector()) {
SmallVector<Register, 8> SrcRegs;
getUnmergePieces(SrcRegs, MIRBuilder, Src, DstTy.getElementType());
- MIRBuilder.buildMerge(Dst, SrcRegs);
+ MIRBuilder.buildMergeLikeInstr(Dst, SrcRegs);
MI.eraseFromParent();
return Legalized;
}
@@ -3760,7 +3762,7 @@ LegalizerHelper::fewerElementsVectorMultiEltType(
mergeMixedSubvectors(MI.getReg(i), OutputRegs[i]);
} else {
for (unsigned i = 0; i < NumDefs; ++i)
- MIRBuilder.buildMerge(MI.getReg(i), OutputRegs[i]);
+ MIRBuilder.buildMergeLikeInstr(MI.getReg(i), OutputRegs[i]);
}
MI.eraseFromParent();
@@ -3808,7 +3810,7 @@ LegalizerHelper::fewerElementsVectorPhi(GenericMachineInstr &MI,
if (NumLeftovers) {
mergeMixedSubvectors(MI.getReg(0), OutputRegs);
} else {
- MIRBuilder.buildMerge(MI.getReg(0), OutputRegs);
+ MIRBuilder.buildMergeLikeInstr(MI.getReg(0), OutputRegs);
}
MI.eraseFromParent();
@@ -3911,10 +3913,11 @@ LegalizerHelper::fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx,
for (unsigned i = 0, Offset = 0; i < NumNarrowTyPieces;
++i, Offset += NumNarrowTyElts) {
ArrayRef<Register> Pieces(&Elts[Offset], NumNarrowTyElts);
- NarrowTyElts.push_back(MIRBuilder.buildMerge(NarrowTy, Pieces).getReg(0));
+ NarrowTyElts.push_back(
+ MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
}
- MIRBuilder.buildMerge(DstReg, NarrowTyElts);
+ MIRBuilder.buildMergeLikeInstr(DstReg, NarrowTyElts);
MI.eraseFromParent();
return Legalized;
}
@@ -3942,10 +3945,11 @@ LegalizerHelper::fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx,
SmallVector<Register, 8> Sources;
for (unsigned j = 0; j < NumElts; ++j)
Sources.push_back(MI.getOperand(1 + i * NumElts + j).getReg());
- NarrowTyElts.push_back(MIRBuilder.buildMerge(NarrowTy, Sources).getReg(0));
+ NarrowTyElts.push_back(
+ MIRBuilder.buildMergeLikeInstr(NarrowTy, Sources).getReg(0));
}
- MIRBuilder.buildMerge(DstReg, NarrowTyElts);
+ MIRBuilder.buildMergeLikeInstr(DstReg, NarrowTyElts);
MI.eraseFromParent();
return Legalized;
}
@@ -4588,7 +4592,7 @@ LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1));
if (Amt.isZero()) {
- MIRBuilder.buildMerge(MI.getOperand(0), {InL, InH});
+ MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), {InL, InH});
MI.eraseFromParent();
return Legalized;
}
@@ -4661,7 +4665,7 @@ LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
}
}
- MIRBuilder.buildMerge(MI.getOperand(0), {Lo, Hi});
+ MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), {Lo, Hi});
MI.eraseFromParent();
return Legalized;
@@ -4772,7 +4776,7 @@ LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
llvm_unreachable("not a shift");
}
- MIRBuilder.buildMerge(DstReg, ResultRegs);
+ MIRBuilder.buildMergeLikeInstr(DstReg, ResultRegs);
MI.eraseFromParent();
return Legalized;
}
@@ -5239,7 +5243,7 @@ LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
// Take only high half of registers if this is high mul.
ArrayRef<Register> DstRegs(&DstTmpRegs[DstTmpParts - NumParts], NumParts);
- MIRBuilder.buildMerge(DstReg, DstRegs);
+ MIRBuilder.buildMergeLikeInstr(DstReg, DstRegs);
MI.eraseFromParent();
return Legalized;
}
@@ -5329,7 +5333,7 @@ LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx,
if (MRI.getType(DstReg).isVector())
MIRBuilder.buildBuildVector(DstReg, DstRegs);
else if (DstRegs.size() > 1)
- MIRBuilder.buildMerge(DstReg, DstRegs);
+ MIRBuilder.buildMergeLikeInstr(DstReg, DstRegs);
else
MIRBuilder.buildCopy(DstReg, DstRegs[0]);
MI.eraseFromParent();
@@ -5411,10 +5415,10 @@ LegalizerHelper::narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx,
Register DstReg = MI.getOperand(0).getReg();
if (WideSize > RegTy.getSizeInBits()) {
Register MergeReg = MRI.createGenericVirtualRegister(LLT::scalar(WideSize));
- MIRBuilder.buildMerge(MergeReg, DstRegs);
+ MIRBuilder.buildMergeLikeInstr(MergeReg, DstRegs);
MIRBuilder.buildTrunc(DstReg, MergeReg);
} else
- MIRBuilder.buildMerge(DstReg, DstRegs);
+ MIRBuilder.buildMergeLikeInstr(DstReg, DstRegs);
MI.eraseFromParent();
return Legalized;
@@ -6672,7 +6676,7 @@ LegalizerHelper::lowerExtractInsertVectorElt(MachineInstr &MI) {
if (InsertVal) {
SrcRegs[IdxVal] = MI.getOperand(2).getReg();
- MIRBuilder.buildMerge(DstReg, SrcRegs);
+ MIRBuilder.buildMergeLikeInstr(DstReg, SrcRegs);
} else {
MIRBuilder.buildCopy(DstReg, SrcRegs[IdxVal]);
}
@@ -6844,7 +6848,7 @@ LegalizerHelper::lowerExtract(MachineInstr &MI) {
if (SubVectorElts.size() == 1)
MIRBuilder.buildCopy(Dst, SubVectorElts[0]);
else
- MIRBuilder.buildMerge(Dst, SubVectorElts);
+ MIRBuilder.buildMergeLikeInstr(Dst, SubVectorElts);
MI.eraseFromParent();
return Legalized;
@@ -6917,7 +6921,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerInsert(MachineInstr &MI) {
DstElts.push_back(UnmergeSrc.getReg(Idx));
}
- MIRBuilder.buildMerge(Dst, DstElts);
+ MIRBuilder.buildMergeLikeInstr(Dst, DstElts);
MI.eraseFromParent();
return Legalized;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 726f9b47de786..a6148a6985a35 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -243,7 +243,7 @@ MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
for (unsigned i = 0; i < NumberOfPadElts; ++i)
Regs.push_back(Undef);
- return buildMerge(Res, Regs);
+ return buildMergeLikeInstr(Res, Regs);
}
MachineInstrBuilder
@@ -262,7 +262,7 @@ MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
Regs.push_back(Unmerge.getReg(i));
- return buildMerge(Res, Regs);
+ return buildMergeLikeInstr(Res, Regs);
}
MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
@@ -597,8 +597,9 @@ MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
}
-MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
- ArrayRef<Register> Ops) {
+MachineInstrBuilder
+MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
+ ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
@@ -608,8 +609,8 @@ MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
}
MachineInstrBuilder
-MachineIRBuilder::buildMerge(const DstOp &Res,
- std::initializer_list<SrcOp> Ops) {
+MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
+ std::initializer_list<SrcOp> Ops) {
assert(Ops.size() > 1);
return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
index 9bbe7441edaa6..1354364e5752f 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
@@ -414,7 +414,8 @@ bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
}
auto Undef = MIRBuilder.buildUndef({OldLLT});
CurVReg =
- MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef}).getReg(0);
+ MIRBuilder.buildMergeLikeInstr({NewLLT}, {CurVReg, Undef})
+ .getReg(0);
} else {
// Just do a vector extend.
CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 5bf277163c102..186d0ed35d56f 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1197,7 +1197,8 @@ bool AArch64LegalizerInfo::legalizeLoadStore(
MachineInstrBuilder NewI;
if (MI.getOpcode() == TargetOpcode::G_LOAD) {
NewI = MIRBuilder.buildInstr(AArch64::LDPXi, {s64, s64}, {});
- MIRBuilder.buildMerge(ValReg, {NewI->getOperand(0), NewI->getOperand(1)});
+ MIRBuilder.buildMergeLikeInstr(
+ ValReg, {NewI->getOperand(0), NewI->getOperand(1)});
} else {
auto Split = MIRBuilder.buildUnmerge(s64, MI.getOperand(0));
NewI = MIRBuilder.buildInstr(
@@ -1499,7 +1500,7 @@ bool AArch64LegalizerInfo::legalizeAtomicCmpxchg128(
*MRI.getTargetRegisterInfo(),
*ST->getRegBankInfo());
- MIRBuilder.buildMerge(MI.getOperand(0), {DstLo, DstHi});
+ MIRBuilder.buildMergeLikeInstr(MI.getOperand(0), {DstLo, DstHi});
MI.eraseFromParent();
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 7e9d1685b70b1..302889f8ffe49 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1989,7 +1989,7 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
// TODO: Should we allow mismatched types but matching sizes in merges to
// avoid the ptrtoint?
- auto BuildPtr = B.buildMerge(DstTy, {SrcAsInt, ApertureReg});
+ auto BuildPtr = B.buildMergeLikeInstr(DstTy, {SrcAsInt, ApertureReg});
if (isKnownNonNull(Src, MRI, TM, SrcAS)) {
B.buildCopy(Dst, BuildPtr);
@@ -2023,7 +2023,7 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
uint32_t AddrHiVal = Info->get32BitAddressHighBits();
auto PtrLo = B.buildPtrToInt(S32, Src);
auto HighAddr = B.buildConstant(S32, AddrHiVal);
- B.buildMerge(Dst, {PtrLo, HighAddr});
+ B.buildMergeLikeInstr(Dst, {PtrLo, HighAddr});
MI.eraseFromParent();
return true;
}
@@ -2156,7 +2156,7 @@ bool AMDGPULegalizerInfo::legalizeIntrinsicTrunc(
const auto Zero32 = B.buildConstant(S32, 0);
// Extend back to 64-bits.
- auto SignBit64 = B.buildMerge(S64, {Zero32, SignBit});
+ auto SignBit64 = B.buildMergeLikeInstr(S64, {Zero32, SignBit});
auto Shr = B.buildAShr(S64, FractMask, Exp);
auto Not = B.buildNot(S64, Shr);
@@ -2292,11 +2292,12 @@ bool AMDGPULegalizerInfo::legalizeFPTOI(MachineInstr &MI,
if (Signed && SrcLT == S32) {
// Flip the result based on the signedness, which is either all 0s or 1s.
- Sign = B.buildMerge(S64, {Sign, Sign});
+ Sign = B.buildMergeLikeInstr(S64, {Sign, Sign});
// r := xor({lo, hi}, sign) - sign;
- B.buildSub(Dst, B.buildXor(S64, B.buildMerge(S64, {Lo, Hi}), Sign), Sign);
+ B.buildSub(Dst, B.buildXor(S64, B.buildMergeLikeInstr(S64, {Lo, Hi}), Sign),
+ Sign);
} else
- B.buildMerge(Dst, {Lo, Hi});
+ B.buildMergeLikeInstr(Dst, {Lo, Hi});
MI.eraseFromParent();
return true;
@@ -2388,7 +2389,7 @@ bool AMDGPULegalizerInfo::legalizeInsertVectorElt(
B.buildUnmerge(SrcRegs, Vec);
SrcRegs[IdxVal] = MI.getOperand(2).getReg();
- B.buildMerge(Dst, SrcRegs);
+ B.buildMergeLikeInstr(Dst, SrcRegs);
} else {
B.buildUndef(Dst);
}
@@ -2873,7 +2874,7 @@ bool AMDGPULegalizerInfo::legalizeBuildVector(
Src1 = B.buildTrunc(S16, MI.getOperand(2).getReg()).getReg(0);
}
- auto Merge = B.buildMerge(S32, {Src0, Src1});
+ auto Merge = B.buildMergeLikeInstr(S32, {Src0, Src1});
B.buildBitcast(Dst, Merge);
MI.eraseFromParent();
@@ -3007,7 +3008,7 @@ void AMDGPULegalizerInfo::buildMultiply(
Tmp = B.buildAnyExt(S64, LocalAccum[0]).getReg(0);
HaveSmallAccum = true;
} else if (LocalAccum[1]) {
- Tmp = B.buildMerge(S64, LocalAccum).getReg(0);
+ Tmp = B.buildMergeLikeInstr(S64, LocalAccum).getReg(0);
HaveSmallAccum = false;
} else {
Tmp = B.buildZExt(S64, LocalAccum[0]).getReg(0);
@@ -3166,7 +3167,7 @@ bool AMDGPULegalizerInfo::legalizeMul(LegalizerHelper &Helper,
buildMultiply(Helper, AccumRegs, Src0Parts, Src1Parts, UsePartialMad64_32,
SeparateOddAlignedProducts);
- B.buildMerge(DstReg, AccumRegs);
+ B.buildMergeLikeInstr(DstReg, AccumRegs);
MI.eraseFromParent();
return true;
@@ -3515,7 +3516,7 @@ void AMDGPULegalizerInfo::legalizeUnsignedDIV_REM64Impl(MachineIRBuilder &B,
std::tie(RcpLo, RcpHi) = emitReciprocalU64(B, Denom);
- auto Rcp = B.buildMerge(S64, {RcpLo, RcpHi});
+ auto Rcp = B.buildMergeLikeInstr(S64, {RcpLo, RcpHi});
auto Zero64 = B.buildConstant(S64, 0);
auto NegDenom = B.buildSub(S64, Zero64, Denom);
@@ -3529,7 +3530,7 @@ void AMDGPULegalizerInfo::legalizeUnsignedDIV_REM64Impl(MachineIRBuilder &B,
auto Add1_Lo = B.buildUAddo(S32, S1, RcpLo, MulHi1_Lo);
auto Add1_Hi = B.buildUAdde(S32, S1, RcpHi, MulHi1_Hi, Add1_Lo.getReg(1));
- auto Add1 = B.buildMerge(S64, {Add1_Lo, Add1_Hi});
+ auto Add1 = B.buildMergeLikeInstr(S64, {Add1_Lo, Add1_Hi});
auto MulLo2 = B.buildMul(S64, NegDenom, Add1);
auto MulHi2 = B.buildUMulH(S64, Add1, MulLo2);
@@ -3540,7 +3541,7 @@ void AMDGPULegalizerInfo::legalizeUnsignedDIV_REM64Impl(MachineIRBuilder &B,
auto Zero32 = B.buildConstant(S32, 0);
auto Add2_Lo = B.buildUAddo(S32, S1, Add1_Lo, MulHi2_Lo);
auto Add2_Hi = B.buildUAdde(S32, S1, Add1_Hi, MulHi2_Hi, Add2_Lo.getReg(1));
- auto Add2 = B.buildMerge(S64, {Add2_Lo, Add2_Hi});
+ auto Add2 = B.buildMergeLikeInstr(S64, {Add2_Lo, Add2_Hi});
auto UnmergeNumer = B.buildUnmerge(S32, Numer);
Register NumerLo = UnmergeNumer.getReg(0);
@@ -3554,7 +3555,7 @@ void AMDGPULegalizerInfo::legalizeUnsignedDIV_REM64Impl(MachineIRBuilder &B,
auto Sub1_Lo = B.buildUSubo(S32, S1, NumerLo, Mul3_Lo);
auto Sub1_Hi = B.buildUSube(S32, S1, NumerHi, Mul3_Hi, Sub1_Lo.getReg(1));
auto Sub1_Mi = B.buildSub(S32, NumerHi, Mul3_Hi);
- auto Sub1 = B.buildMerge(S64, {Sub1_Lo, Sub1_Hi});
+ auto Sub1 = B.buildMergeLikeInstr(S64, {Sub1_Lo, Sub1_Hi});
auto UnmergeDenom = B.buildUnmerge(S32, Denom);
Register DenomLo = UnmergeDenom.getReg(0);
@@ -3577,7 +3578,7 @@ void AMDGPULegalizerInfo::legalizeUnsignedDIV_REM64Impl(MachineIRBuilder &B,
auto Sub2_Lo = B.buildUSubo(S32, S1, Sub1_Lo, DenomLo);
auto Sub2_Mi = B.buildUSube(S32, S1, Sub1_Mi, DenomHi, Sub1_Lo.getReg(1));
auto Sub2_Hi = B.buildUSube(S32, S1, Sub2_Mi, Zero32, Sub2_Lo.getReg(1));
- auto Sub2 = B.buildMerge(S64, {Sub2_Lo, Sub2_Hi});
+ auto Sub2 = B.buildMergeLikeInstr(S64, {Sub2_Lo, Sub2_Hi});
auto One64 = B.buildConstant(S64, 1);
auto Add3 = B.buildAdd(S64, MulHi3, One64);
@@ -3595,7 +3596,7 @@ void AMDGPULegalizerInfo::legalizeUnsignedDIV_REM64Impl(MachineIRBuilder &B,
auto Sub3_Mi = B.buildUSube(S32, S1, Sub2_Mi, DenomHi, Sub2_Lo.getReg(1));
auto Sub3_Hi = B.buildUSube(S32, S1, Sub3_Mi, Zero32, Sub3_Lo.getReg(1));
- auto Sub3 = B.buildMerge(S64, {Sub3_Lo, Sub3_Hi});
+ auto Sub3 = B.buildMergeLikeInstr(S64, {Sub3_Lo, Sub3_Hi});
// endif C6
// endif C3
@@ -4593,7 +4594,7 @@ bool AMDGPULegalizerInfo::legalizeBufferLoad(MachineInstr &MI,
LoadElts.push_back(StatusDst);
B.buildUnmerge(LoadElts, LoadDstReg);
LoadElts.truncate(NumValueDWords);
- B.buildMerge(Dst, LoadElts);
+ B.buildMergeLikeInstr(Dst, LoadElts);
}
} else if ((!IsD16 && MemTy.getSizeInBits() < 32) ||
(IsD16 && !Ty.isVector())) {
@@ -4613,7 +4614,7 @@ bool AMDGPULegalizerInfo::legalizeBufferLoad(MachineInstr &MI,
SmallVector<Register, 4> Repack;
for (unsigned I = 0, N = Unmerge->getNumOperands() - 1; I != N; ++I)
Repack.push_back(B.buildTrunc(EltTy, Unmerge.getReg(I)).getReg(0));
- B.buildMerge(Dst, Repack);
+ B.buildMergeLikeInstr(Dst, Repack);
} else {
buildBufferLoad(Opc, Dst, RSrc, VIndex, VOffset, SOffset, ImmOffset, Format,
AuxiliaryData, MMO, IsTyped, HasVIndex, B);
@@ -5446,7 +5447,7 @@ bool AMDGPULegalizerInfo::legalizeBVHIntrinsic(MachineInstr &MI,
if (UseNSA && IsGFX11Plus) {
auto packLanes = [&Ops, &S32, &V3S32, &B](Register Src) {
auto Unmerge = B.buildUnmerge({S32, S32, S32}, Src);
- auto Merged = B.buildMerge(
+ auto Merged = B.buildMergeLikeInstr(
V3S32, {Unmerge.getReg(0), Unmerge.getReg(1), Unmerge.getReg(2)});
Ops.push_back(Merged.getReg(0));
};
@@ -5458,16 +5459,19 @@ bool AMDGPULegalizerInfo::legalizeBVHIntrinsic(MachineInstr &MI,
if (IsA16) {
auto UnmergeRayDir = B.buildUnmerge({S16, S16, S16}, RayDir);
auto UnmergeRayInvDir = B.buildUnmerge({S16, S16, S16}, RayInvDir);
- auto MergedDir = B.buildMerge(
+ auto MergedDir = B.buildMergeLikeInstr(
V3S32,
- {B.buildBitcast(S32, B.buildMerge(V2S16, {UnmergeRayInvDir.getReg(0),
- UnmergeRayDir.getReg(0)}))
+ {B.buildBitcast(
+ S32, B.buildMergeLikeInstr(V2S16, {UnmergeRayInvDir.getReg(0),
+ UnmergeRayDir.getReg(0)}))
.getReg(0),
- B.buildBitcast(S32, B.buildMerge(V2S16, {UnmergeRayInvDir.getReg(1),
- UnmergeRayDir.getReg(1)}))
+ B.buildBitcast(
+ S32, B.buildMergeLikeInstr(V2S16, {UnmergeRayInvDir.getReg(1),
+ UnmergeRayDir.getReg(1)}))
.getReg(0),
- B.buildBitcast(S32, B.buildMerge(V2S16, {UnmergeRayInvDir.getReg(2),
- UnmergeRayDir.getReg(2)}))
+ B.buildBitcast(
+ S32, B.buildMergeLikeInstr(V2S16, {UnmergeRayInvDir.getReg(2),
+ UnmergeRayDir.getReg(2)}))
.getReg(0)});
Ops.push_back(MergedDir.getReg(0));
} else {
@@ -5498,10 +5502,12 @@ bool AMDGPULegalizerInfo::legalizeBVHIntrinsic(MachineInstr &MI,
Register R1 = MRI.createGenericVirtualRegister(S32);
Register R2 = MRI.createGenericVirtualRegister(S32);
Register R3 = MRI.createGenericVirtualRegister(S32);
- B.buildMerge(R1, {UnmergeRayDir.getReg(0), UnmergeRayDir.getReg(1)});
- B.buildMerge(R2, {UnmergeRayDir.getReg(2), UnmergeRayInvDir.getReg(0)});
- B.buildMerge(R3,
- {UnmergeRayInvDir.getReg(1), UnmergeRayInvDir.getReg(2)});
+ B.buildMergeLikeInstr(R1,
+ {UnmergeRayDir.getReg(0), UnmergeRayDir.getReg(1)});
+ B.buildMergeLikeInstr(
+ R2, {UnmergeRayDir.getReg(2), UnmergeRayInvDir.getReg(0)});
+ B.buildMergeLikeInstr(
+ R3, {UnmergeRayInvDir.getReg(1), UnmergeRayInvDir.getReg(2)});
Ops.push_back(R1);
Ops.push_back(R2);
Ops.push_back(R3);
@@ -5514,7 +5520,7 @@ bool AMDGPULegalizerInfo::legalizeBVHIntrinsic(MachineInstr &MI,
if (!UseNSA) {
// Build a single vector containing all the operands so far prepared.
LLT OpTy = LLT::fixed_vector(Ops.size(), 32);
- Register MergedOps = B.buildMerge(OpTy, Ops).getReg(0);
+ Register MergedOps = B.buildMergeLikeInstr(OpTy, Ops).getReg(0);
Ops.clear();
Ops.push_back(MergedOps);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index b21cd788dcf59..5e16a405f375c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -730,7 +730,7 @@ Register AMDGPURegisterBankInfo::buildReadFirstLane(MachineIRBuilder &B,
if (Bits == 32)
return DstParts[0];
- Register Dst = B.buildMerge(Ty, DstParts).getReg(0);
+ Register Dst = B.buildMergeLikeInstr(Ty, DstParts).getReg(0);
MRI.setRegBank(Dst, AMDGPU::SGPRRegBank);
return Dst;
}
@@ -1440,7 +1440,7 @@ bool AMDGPURegisterBankInfo::applyMappingSBufferLoad(
if (Ty.isVector())
B.buildConcatVectors(Dst, LoadParts);
else
- B.buildMerge(Dst, LoadParts);
+ B.buildMergeLikeInstr(Dst, LoadParts);
}
// We removed the instruction earlier with a waterfall loop.
@@ -1500,7 +1500,7 @@ bool AMDGPURegisterBankInfo::applyMappingBFE(const OperandsMapper &OpdMapper,
: B.buildUbfx(S32, UnmergeSOffset.getReg(0), Zero, WidthReg);
auto Extend =
Signed ? B.buildAShr(S32, Extract, B.buildConstant(S32, 31)) : Zero;
- B.buildMerge(DstReg, {Extract, Extend});
+ B.buildMergeLikeInstr(DstReg, {Extract, Extend});
} else {
// Use bitfield extract on upper 32-bit source, and combine with lower
// 32-bit source.
@@ -1509,7 +1509,7 @@ bool AMDGPURegisterBankInfo::applyMappingBFE(const OperandsMapper &OpdMapper,
Signed
? B.buildSbfx(S32, UnmergeSOffset.getReg(1), Zero, UpperWidth)
: B.buildUbfx(S32, UnmergeSOffset.getReg(1), Zero, UpperWidth);
- B.buildMerge(DstReg, {UnmergeSOffset.getReg(0), Extract});
+ B.buildMergeLikeInstr(DstReg, {UnmergeSOffset.getReg(0), Extract});
}
MI.eraseFromParent();
return true;
@@ -1696,7 +1696,7 @@ bool AMDGPURegisterBankInfo::applyMappingMAD_64_32(
}
}
- B.buildMerge(Dst0, {DstLo, DstHi});
+ B.buildMergeLikeInstr(Dst0, {DstLo, DstHi});
if (DstOnValu) {
B.buildCopy(Dst1, Carry);
@@ -1783,7 +1783,8 @@ Register AMDGPURegisterBankInfo::handleD16VData(MachineIRBuilder &B,
const LLT S32 = LLT::scalar(32);
int NumElts = StoreVT.getNumElements();
- return B.buildMerge(LLT::fixed_vector(NumElts, S32), WideRegs).getReg(0);
+ return B.buildMergeLikeInstr(LLT::fixed_vector(NumElts, S32), WideRegs)
+ .getReg(0);
}
static std::pair<Register, unsigned>
diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp
index b15ef094d9d21..1d204a44ddfba 100644
--- a/llvm/lib/Target/ARM/ARMCallLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp
@@ -337,7 +337,7 @@ struct ARMIncomingValueHandler : public CallLowering::IncomingValueHandler {
if (!IsLittle)
std::swap(NewRegs[0], NewRegs[1]);
- MIRBuilder.buildMerge(Arg.Regs[0], NewRegs);
+ MIRBuilder.buildMergeLikeInstr(Arg.Regs[0], NewRegs);
return 1;
}
diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp
index 3c1c2bcd7a1b9..044fad6d9e5c9 100644
--- a/llvm/lib/Target/Mips/MipsCallLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp
@@ -181,7 +181,7 @@ MipsIncomingValueHandler::assignCustomValue(CallLowering::ArgInfo &Arg,
Arg.OrigRegs.assign(Arg.Regs.begin(), Arg.Regs.end());
Arg.Regs = { CopyLo.getReg(0), CopyHi.getReg(0) };
- MIRBuilder.buildMerge(Arg.OrigRegs[0], {CopyLo, CopyHi});
+ MIRBuilder.buildMergeLikeInstr(Arg.OrigRegs[0], {CopyLo, CopyHi});
markPhysRegUsed(VALo.getLocReg());
markPhysRegUsed(VAHi.getLocReg());
diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
index 35b0fe218d8f4..7ed504325dbfa 100644
--- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
@@ -411,9 +411,10 @@ bool MipsLegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
auto Load_Rem = MIRBuilder.buildLoad(s32, Addr, *RemMemOp);
if (Size == 64)
- MIRBuilder.buildMerge(Val, {Load_P2Half, Load_Rem});
+ MIRBuilder.buildMergeLikeInstr(Val, {Load_P2Half, Load_Rem});
else {
- auto Merge = MIRBuilder.buildMerge(s64, {Load_P2Half, Load_Rem});
+ auto Merge =
+ MIRBuilder.buildMergeLikeInstr(s64, {Load_P2Half, Load_Rem});
MIRBuilder.buildTrunc(Val, Merge);
}
}
@@ -440,7 +441,8 @@ bool MipsLegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
// Done. Trunc double to float if needed.
auto C_HiMask = MIRBuilder.buildConstant(s32, UINT32_C(0x43300000));
- auto Bitcast = MIRBuilder.buildMerge(s64, {Src, C_HiMask.getReg(0)});
+ auto Bitcast =
+ MIRBuilder.buildMergeLikeInstr(s64, {Src, C_HiMask.getReg(0)});
MachineInstrBuilder TwoP52FP = MIRBuilder.buildFConstant(
s64, BitsToDouble(UINT64_C(0x4330000000000000)));
diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp
index fd5163028104b..919f4f9e119b8 100644
--- a/llvm/lib/Target/X86/X86CallLowering.cpp
+++ b/llvm/lib/Target/X86/X86CallLowering.cpp
@@ -398,7 +398,7 @@ bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
return false;
if (!NewRegs.empty())
- MIRBuilder.buildMerge(Info.OrigRet.Regs[0], NewRegs);
+ MIRBuilder.buildMergeLikeInstr(Info.OrigRet.Regs[0], NewRegs);
}
CallSeqStart.addImm(Assigner.getStackSize())
diff --git a/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp b/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
index 542ed2a347280..4eb9d3bb7b62a 100644
--- a/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
@@ -1784,7 +1784,7 @@ TEST_F(AArch64GISelMITest, LowerMergeValues) {
for (int I = 0; I != 8; ++I)
Merge0Ops.push_back(B.buildConstant(S3, I).getReg(0));
- auto Merge0 = B.buildMerge(S24, Merge0Ops);
+ auto Merge0 = B.buildMergeLikeInstr(S24, Merge0Ops);
// 21 = 3 3 3 3 3 3 3
// => 9, 2 extra implicit_def needed
@@ -1793,13 +1793,13 @@ TEST_F(AArch64GISelMITest, LowerMergeValues) {
for (int I = 0; I != 7; ++I)
Merge1Ops.push_back(B.buildConstant(S3, I).getReg(0));
- auto Merge1 = B.buildMerge(S21, Merge1Ops);
+ auto Merge1 = B.buildMergeLikeInstr(S21, Merge1Ops);
SmallVector<Register, 8> Merge2Ops;
for (int I = 0; I != 2; ++I)
Merge2Ops.push_back(B.buildConstant(S8, I).getReg(0));
- auto Merge2 = B.buildMerge(S16, Merge2Ops);
+ auto Merge2 = B.buildMergeLikeInstr(S16, Merge2Ops);
B.setInstr(*Merge0);
EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
@@ -1877,7 +1877,7 @@ TEST_F(AArch64GISelMITest, WidenScalarMergeValuesPointer) {
auto Lo = B.buildTrunc(S32, Copies[0]);
auto Hi = B.buildTrunc(S32, Copies[1]);
- auto Merge = B.buildMerge(P0, {Lo, Hi});
+ auto Merge = B.buildMergeLikeInstr(P0, {Lo, Hi});
B.setInstr(*Merge);
EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
@@ -2087,7 +2087,7 @@ TEST_F(AArch64GISelMITest, LibcallFPTrunc) {
auto MIBFPTrunc1 =
B.buildInstr(TargetOpcode::G_FPTRUNC, {S16}, {MIBTrunc});
- auto MIBMerge = B.buildMerge(S128, {Copies[1], Copies[2]});
+ auto MIBMerge = B.buildMergeLikeInstr(S128, {Copies[1], Copies[2]});
auto MIBFPTrunc2 =
B.buildInstr(TargetOpcode::G_FPTRUNC, {S64}, {MIBMerge});
diff --git a/llvm/unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp b/llvm/unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp
index da80785964859..4d445589043a5 100644
--- a/llvm/unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp
@@ -197,7 +197,7 @@ TEST_F(AArch64GISelMITest, BuildXor) {
B.buildNot(S64, Copies[0]);
// Make sure this works with > 64-bit types
- auto Merge = B.buildMerge(S128, {Copies[0], Copies[1]});
+ auto Merge = B.buildMergeLikeInstr(S128, {Copies[0], Copies[1]});
B.buildNot(S128, Merge);
auto CheckStr = R"(
; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
@@ -324,7 +324,7 @@ TEST_F(AArch64GISelMITest, BuildAtomicRMW) {
EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
}
-TEST_F(AArch64GISelMITest, BuildMerge) {
+TEST_F(AArch64GISelMITest, BuildMergeLikeInstr) {
setUp();
if (!TM)
return;
@@ -337,15 +337,13 @@ TEST_F(AArch64GISelMITest, BuildMerge) {
// Merging plain constants as one big blob of bit should produce a
// G_MERGE_VALUES.
- B.buildMerge(LLT::scalar(128), {RegC0, RegC1, RegC2, RegC3});
+ B.buildMergeLikeInstr(LLT::scalar(128), {RegC0, RegC1, RegC2, RegC3});
// Merging plain constants to a vector should produce a G_BUILD_VECTOR.
LLT V2x32 = LLT::fixed_vector(2, 32);
- Register RegC0C1 =
- B.buildMerge(V2x32, {RegC0, RegC1}).getReg(0);
- Register RegC2C3 =
- B.buildMerge(V2x32, {RegC2, RegC3}).getReg(0);
+ Register RegC0C1 = B.buildMergeLikeInstr(V2x32, {RegC0, RegC1}).getReg(0);
+ Register RegC2C3 = B.buildMergeLikeInstr(V2x32, {RegC2, RegC3}).getReg(0);
// Merging vector constants to a vector should produce a G_CONCAT_VECTORS.
- B.buildMerge(LLT::fixed_vector(4, 32), {RegC0C1, RegC2C3});
+ B.buildMergeLikeInstr(LLT::fixed_vector(4, 32), {RegC0C1, RegC2C3});
// Merging vector constants to a plain type is not allowed.
// Nothing else to test.
More information about the llvm-commits
mailing list