[llvm] 6443c0e - [AMDGPU] Stop using make_pair and make_tuple. NFC.

Jay Foad via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 14 05:25:01 PST 2022


Author: Jay Foad
Date: 2022-12-14T13:22:26Z
New Revision: 6443c0ee02c7785bc917fcf508f4cc7ded38487a

URL: https://github.com/llvm/llvm-project/commit/6443c0ee02c7785bc917fcf508f4cc7ded38487a
DIFF: https://github.com/llvm/llvm-project/commit/6443c0ee02c7785bc917fcf508f4cc7ded38487a.diff

LOG: [AMDGPU] Stop using make_pair and make_tuple. NFC.

C++17 allows us to call constructors pair and tuple instead of helper
functions make_pair and make_tuple.

Differential Revision: https://reviews.llvm.org/D139828

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp
    llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
    llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp
    llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
    llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
    llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
    llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
    llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
    llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp
    llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
    llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp
    llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp
    llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
    llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
    llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
    llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
    llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp
    llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
    llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
    llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp
    llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
    llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
    llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
    llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
    llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
    llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
    llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
    llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
    llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
    llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
    llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp
index d361e33995cf4..1c18cbd855fce 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp
@@ -91,61 +91,59 @@ AMDGPUFunctionArgInfo::getPreloadedValue(
     AMDGPUFunctionArgInfo::PreloadedValue Value) const {
   switch (Value) {
   case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER: {
-    return std::make_tuple(PrivateSegmentBuffer ? &PrivateSegmentBuffer
-                                                : nullptr,
-                           &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(4, 32));
+    return std::tuple(PrivateSegmentBuffer ? &PrivateSegmentBuffer : nullptr,
+                      &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(4, 32));
   }
   case AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR:
-    return std::make_tuple(ImplicitBufferPtr ? &ImplicitBufferPtr : nullptr,
-                           &AMDGPU::SGPR_64RegClass,
-                           LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
+    return std::tuple(ImplicitBufferPtr ? &ImplicitBufferPtr : nullptr,
+                      &AMDGPU::SGPR_64RegClass,
+                      LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
   case AMDGPUFunctionArgInfo::WORKGROUP_ID_X:
-    return std::make_tuple(WorkGroupIDX ? &WorkGroupIDX : nullptr,
-                           &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
+    return std::tuple(WorkGroupIDX ? &WorkGroupIDX : nullptr,
+                      &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
   case AMDGPUFunctionArgInfo::WORKGROUP_ID_Y:
-    return std::make_tuple(WorkGroupIDY ? &WorkGroupIDY : nullptr,
-                           &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
+    return std::tuple(WorkGroupIDY ? &WorkGroupIDY : nullptr,
+                      &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
   case AMDGPUFunctionArgInfo::WORKGROUP_ID_Z:
-    return std::make_tuple(WorkGroupIDZ ? &WorkGroupIDZ : nullptr,
-                           &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
+    return std::tuple(WorkGroupIDZ ? &WorkGroupIDZ : nullptr,
+                      &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
   case AMDGPUFunctionArgInfo::LDS_KERNEL_ID:
-    return std::make_tuple(LDSKernelId ? &LDSKernelId : nullptr,
-                           &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
+    return std::tuple(LDSKernelId ? &LDSKernelId : nullptr,
+                      &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
   case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET:
-    return std::make_tuple(
+    return std::tuple(
         PrivateSegmentWaveByteOffset ? &PrivateSegmentWaveByteOffset : nullptr,
         &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
   case AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR:
-    return std::make_tuple(KernargSegmentPtr ? &KernargSegmentPtr : nullptr,
-                           &AMDGPU::SGPR_64RegClass,
-                           LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
+    return std::tuple(KernargSegmentPtr ? &KernargSegmentPtr : nullptr,
+                      &AMDGPU::SGPR_64RegClass,
+                      LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
   case AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR:
-    return std::make_tuple(ImplicitArgPtr ? &ImplicitArgPtr : nullptr,
-                           &AMDGPU::SGPR_64RegClass,
-                           LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
+    return std::tuple(ImplicitArgPtr ? &ImplicitArgPtr : nullptr,
+                      &AMDGPU::SGPR_64RegClass,
+                      LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
   case AMDGPUFunctionArgInfo::DISPATCH_ID:
-    return std::make_tuple(DispatchID ? &DispatchID : nullptr,
-                           &AMDGPU::SGPR_64RegClass, LLT::scalar(64));
+    return std::tuple(DispatchID ? &DispatchID : nullptr,
+                      &AMDGPU::SGPR_64RegClass, LLT::scalar(64));
   case AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT:
-    return std::make_tuple(FlatScratchInit ? &FlatScratchInit : nullptr,
-                           &AMDGPU::SGPR_64RegClass, LLT::scalar(64));
+    return std::tuple(FlatScratchInit ? &FlatScratchInit : nullptr,
+                      &AMDGPU::SGPR_64RegClass, LLT::scalar(64));
   case AMDGPUFunctionArgInfo::DISPATCH_PTR:
-    return std::make_tuple(DispatchPtr ? &DispatchPtr : nullptr,
-                           &AMDGPU::SGPR_64RegClass,
-                           LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
+    return std::tuple(DispatchPtr ? &DispatchPtr : nullptr,
+                      &AMDGPU::SGPR_64RegClass,
+                      LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
   case AMDGPUFunctionArgInfo::QUEUE_PTR:
-    return std::make_tuple(QueuePtr ? &QueuePtr : nullptr,
-                           &AMDGPU::SGPR_64RegClass,
-                           LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
+    return std::tuple(QueuePtr ? &QueuePtr : nullptr, &AMDGPU::SGPR_64RegClass,
+                      LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
   case AMDGPUFunctionArgInfo::WORKITEM_ID_X:
-    return std::make_tuple(WorkItemIDX ? &WorkItemIDX : nullptr,
-                           &AMDGPU::VGPR_32RegClass, LLT::scalar(32));
+    return std::tuple(WorkItemIDX ? &WorkItemIDX : nullptr,
+                      &AMDGPU::VGPR_32RegClass, LLT::scalar(32));
   case AMDGPUFunctionArgInfo::WORKITEM_ID_Y:
-    return std::make_tuple(WorkItemIDY ? &WorkItemIDY : nullptr,
-                           &AMDGPU::VGPR_32RegClass, LLT::scalar(32));
+    return std::tuple(WorkItemIDY ? &WorkItemIDY : nullptr,
+                      &AMDGPU::VGPR_32RegClass, LLT::scalar(32));
   case AMDGPUFunctionArgInfo::WORKITEM_ID_Z:
-    return std::make_tuple(WorkItemIDZ ? &WorkItemIDZ : nullptr,
-                           &AMDGPU::VGPR_32RegClass, LLT::scalar(32));
+    return std::tuple(WorkItemIDZ ? &WorkItemIDZ : nullptr,
+                      &AMDGPU::VGPR_32RegClass, LLT::scalar(32));
   }
   llvm_unreachable("unexpected preloaded value type");
 }

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 41ed2467170f0..08b29641d14a5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -872,7 +872,7 @@ static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
   Value *Lo = Builder.CreateTrunc(MUL64, I32Ty);
   Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32));
   Hi = Builder.CreateTrunc(Hi, I32Ty);
-  return std::make_pair(Lo, Hi);
+  return std::pair(Lo, Hi);
 }
 
 static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp
index 96ce376faa93a..2ffc8b2a3a7b8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp
@@ -28,24 +28,24 @@ AMDGPU::getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
     else
       Offset = Op.getCImm()->getZExtValue();
 
-    return std::make_pair(Register(), Offset);
+    return std::pair(Register(), Offset);
   }
 
   int64_t Offset;
   if (Def->getOpcode() == TargetOpcode::G_ADD) {
     // TODO: Handle G_OR used for add case
     if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(Offset)))
-      return std::make_pair(Def->getOperand(1).getReg(), Offset);
+      return std::pair(Def->getOperand(1).getReg(), Offset);
 
     // FIXME: matcher should ignore copies
     if (mi_match(Def->getOperand(2).getReg(), MRI, m_Copy(m_ICst(Offset))))
-      return std::make_pair(Def->getOperand(1).getReg(), Offset);
+      return std::pair(Def->getOperand(1).getReg(), Offset);
   }
 
   Register Base;
   if (KnownBits && mi_match(Reg, MRI, m_GOr(m_Reg(Base), m_ICst(Offset))) &&
       KnownBits->maskedValueIsZero(Base, APInt(32, Offset)))
-    return std::make_pair(Base, Offset);
+    return std::pair(Base, Offset);
 
   // Handle G_PTRTOINT (G_PTR_ADD base, const) case
   if (Def->getOpcode() == TargetOpcode::G_PTRTOINT) {
@@ -54,14 +54,14 @@ AMDGPU::getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
                  m_GPtrAdd(m_MInstr(Base), m_ICst(Offset)))) {
       // If Base was int converted to pointer, simply return int and offset.
       if (Base->getOpcode() == TargetOpcode::G_INTTOPTR)
-        return std::make_pair(Base->getOperand(1).getReg(), Offset);
+        return std::pair(Base->getOperand(1).getReg(), Offset);
 
       // Register returned here will be of pointer type.
-      return std::make_pair(Base->getOperand(0).getReg(), Offset);
+      return std::pair(Base->getOperand(0).getReg(), Offset);
     }
   }
 
-  return std::make_pair(Reg, 0);
+  return std::pair(Reg, 0);
 }
 
 bool AMDGPU::hasAtomicFaddRtnForTy(const GCNSubtarget &Subtarget,

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
index 2c578cc0042df..bd5f82e252535 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
@@ -33,7 +33,7 @@ static std::pair<Type *, Align> getArgumentTypeAlign(const Argument &Arg,
   if (!ArgAlign)
     ArgAlign = DL.getABITypeAlign(Ty);
 
-  return std::make_pair(Ty, *ArgAlign);
+  return std::pair(Ty, *ArgAlign);
 }
 
 namespace llvm {

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
index d29936c1d4dd1..806e96441b055 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
@@ -347,7 +347,7 @@ void PipelineSolver::convertSyncMapsToArrays() {
     for (auto &SUsToCandSGs : SyncInstrMap.second) {
       if (PipelineInstrs[PipelineIDx].size() == 0) {
         PipelineInstrs[PipelineIDx].push_back(
-            std::make_pair(SUsToCandSGs.first, SUsToCandSGs.second));
+            std::pair(SUsToCandSGs.first, SUsToCandSGs.second));
         continue;
       }
       auto SortPosition = PipelineInstrs[PipelineIDx].begin();
@@ -357,8 +357,7 @@ void PipelineSolver::convertSyncMapsToArrays() {
              SUsToCandSGs.first->NodeNum > SortPosition->first->NodeNum)
         ++SortPosition;
       PipelineInstrs[PipelineIDx].insert(
-          SortPosition,
-          std::make_pair(SUsToCandSGs.first, SUsToCandSGs.second));
+          SortPosition, std::pair(SUsToCandSGs.first, SUsToCandSGs.second));
     }
     --PipelineIDx;
   }
@@ -508,15 +507,15 @@ void PipelineSolver::populateReadyList(
 
     if (UseCostHeur) {
       if (Match->isFull()) {
-        ReadyList.push_back(std::make_pair(*I, MissPenalty));
+        ReadyList.push_back(std::pair(*I, MissPenalty));
         continue;
       }
 
       int TempCost = addEdges(SyncPipeline, CurrSU.first, CandSGID, AddedEdges);
-      ReadyList.push_back(std::make_pair(*I, TempCost));
+      ReadyList.push_back(std::pair(*I, TempCost));
       removeEdges(AddedEdges);
     } else
-      ReadyList.push_back(std::make_pair(*I, -1));
+      ReadyList.push_back(std::pair(*I, -1));
   }
 
   if (UseCostHeur) {
@@ -913,7 +912,7 @@ int SchedGroup::link(SUnit &SU, bool MakePred,
     // the A->B edge impossible, otherwise it returns true;
     bool Added = tryAddEdge(A, B);
     if (Added)
-      AddedEdges.push_back(std::make_pair(A, B));
+      AddedEdges.push_back(std::pair(A, B));
     else
       ++MissedEdges;
   }

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index d8a6bd80f9283..2361b585ad55b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -1358,7 +1358,7 @@ std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const
   // use constant 0 for soffset. This value must be retained until
   // frame elimination and eliminateFrameIndex will choose the appropriate
   // frame register if need be.
-  return std::make_pair(TFI, CurDAG->getTargetConstant(0, DL, MVT::i32));
+  return std::pair(TFI, CurDAG->getTargetConstant(0, DL, MVT::i32));
 }
 
 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent,

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 8338961bdba83..a95f4123aa777 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1473,7 +1473,7 @@ AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
 
-  return std::make_pair(Lo, Hi);
+  return std::pair(Lo, Hi);
 }
 
 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
@@ -1505,7 +1505,7 @@ AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const {
   HiVT = NumElts - LoNumElts == 1
              ? EltVT
              : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
-  return std::make_pair(LoVT, HiVT);
+  return std::pair(LoVT, HiVT);
 }
 
 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be
@@ -1523,7 +1523,7 @@ AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL,
   SDValue Hi = DAG.getNode(
       HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL,
       HiVT, N, DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), DL));
-  return std::make_pair(Lo, Hi);
+  return std::pair(Lo, Hi);
 }
 
 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 32f4b79c1933b..cdd7af8d5ba64 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -2914,8 +2914,8 @@ computeIndirectRegIndex(MachineRegisterInfo &MRI, const SIRegisterInfo &TRI,
   // Skip out of bounds offsets, or else we would end up using an undefined
   // register.
   if (static_cast<unsigned>(Offset) >= SubRegs.size())
-    return std::make_pair(IdxReg, SubRegs[0]);
-  return std::make_pair(IdxBaseReg, SubRegs[Offset]);
+    return std::pair(IdxReg, SubRegs[0]);
+  return std::pair(IdxBaseReg, SubRegs[Offset]);
 }
 
 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
@@ -3526,7 +3526,7 @@ std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl(
   if (OpSel)
     Mods |= SISrcMods::OP_SEL_0;
 
-  return std::make_pair(Src, Mods);
+  return std::pair(Src, Mods);
 }
 
 Register AMDGPUInstructionSelector::copyToVGPRIfSrcFolded(
@@ -3659,7 +3659,7 @@ AMDGPUInstructionSelector::selectVOP3PModsImpl(
   // Packed instructions do not have abs modifiers.
   Mods |= SISrcMods::OP_SEL_1;
 
-  return std::make_pair(Src, Mods);
+  return std::pair(Src, Mods);
 }
 
 InstructionSelector::ComplexRendererFns
@@ -3894,7 +3894,7 @@ AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
                                                 uint64_t FlatVariant) const {
   MachineInstr *MI = Root.getParent();
 
-  auto Default = std::make_pair(Root.getReg(), 0);
+  auto Default = std::pair(Root.getReg(), 0);
 
   if (!STI.hasFlatInstOffsets())
     return Default;
@@ -3910,7 +3910,7 @@ AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
     return Default;
 
-  return std::make_pair(PtrBase, ConstOffset);
+  return std::pair(PtrBase, ConstOffset);
 }
 
 InstructionSelector::ComplexRendererFns
@@ -4374,7 +4374,7 @@ std::pair<Register, unsigned>
 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
   if (!RootDef)
-    return std::make_pair(Root.getReg(), 0);
+    return std::pair(Root.getReg(), 0);
 
   int64_t ConstAddr = 0;
 
@@ -4386,7 +4386,7 @@ AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const
   if (Offset) {
     if (isDSOffsetLegal(PtrBase, Offset)) {
       // (add n0, c0)
-      return std::make_pair(PtrBase, Offset);
+      return std::pair(PtrBase, Offset);
     }
   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
     // TODO
@@ -4397,7 +4397,7 @@ AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const
 
   }
 
-  return std::make_pair(Root.getReg(), 0);
+  return std::pair(Root.getReg(), 0);
 }
 
 InstructionSelector::ComplexRendererFns
@@ -4439,7 +4439,7 @@ AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
                                                   unsigned Size) const {
   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
   if (!RootDef)
-    return std::make_pair(Root.getReg(), 0);
+    return std::pair(Root.getReg(), 0);
 
   int64_t ConstAddr = 0;
 
@@ -4453,7 +4453,7 @@ AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
     int64_t OffsetValue1 = Offset + Size;
     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
       // (add n0, c0)
-      return std::make_pair(PtrBase, OffsetValue0 / Size);
+      return std::pair(PtrBase, OffsetValue0 / Size);
     }
   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
     // TODO
@@ -4463,7 +4463,7 @@ AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
 
   }
 
-  return std::make_pair(Root.getReg(), 0);
+  return std::pair(Root.getReg(), 0);
 }
 
 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index bac333612a7d1..15f1368980d85 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -96,8 +96,8 @@ static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
     const LLT EltTy = Ty.getElementType();
-    return std::make_pair(TypeIdx,
-                          LLT::fixed_vector(Ty.getNumElements() + 1, EltTy));
+    return std::pair(TypeIdx,
+                     LLT::fixed_vector(Ty.getNumElements() + 1, EltTy));
   };
 }
 
@@ -108,9 +108,8 @@ static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
     unsigned Size = Ty.getSizeInBits();
     unsigned Pieces = (Size + 63) / 64;
     unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
-    return std::make_pair(
-        TypeIdx,
-        LLT::scalarOrVector(ElementCount::getFixed(NewNumElts), EltTy));
+    return std::pair(TypeIdx, LLT::scalarOrVector(
+                                  ElementCount::getFixed(NewNumElts), EltTy));
   };
 }
 
@@ -128,7 +127,7 @@ static LegalizeMutation moreEltsToNext32Bit(unsigned TypeIdx) {
     assert(EltSize < 32);
 
     const int NewNumElts = (32 * NextMul32 + EltSize - 1) / EltSize;
-    return std::make_pair(TypeIdx, LLT::fixed_vector(NewNumElts, EltTy));
+    return std::pair(TypeIdx, LLT::fixed_vector(NewNumElts, EltTy));
   };
 }
 
@@ -147,7 +146,7 @@ static LLT getBitcastRegisterType(const LLT Ty) {
 static LegalizeMutation bitcastToRegisterType(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
-    return std::make_pair(TypeIdx, getBitcastRegisterType(Ty));
+    return std::pair(TypeIdx, getBitcastRegisterType(Ty));
   };
 }
 
@@ -156,7 +155,7 @@ static LegalizeMutation bitcastToVectorElement32(unsigned TypeIdx) {
     const LLT Ty = Query.Types[TypeIdx];
     unsigned Size = Ty.getSizeInBits();
     assert(Size % 32 == 0);
-    return std::make_pair(
+    return std::pair(
         TypeIdx, LLT::scalarOrVector(ElementCount::getFixed(Size / 32), 32));
   };
 }
@@ -1069,36 +1068,35 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
   }
 
   getActionDefinitionsBuilder(G_INTTOPTR)
-    // List the common cases
-    .legalForCartesianProduct(AddrSpaces64, {S64})
-    .legalForCartesianProduct(AddrSpaces32, {S32})
-    .scalarize(0)
-    // Accept any address space as long as the size matches
-    .legalIf(sameSize(0, 1))
-    .widenScalarIf(smallerThan(1, 0),
-      [](const LegalityQuery &Query) {
-        return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
-      })
-    .narrowScalarIf(largerThan(1, 0),
-      [](const LegalityQuery &Query) {
-        return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
+      // List the common cases
+      .legalForCartesianProduct(AddrSpaces64, {S64})
+      .legalForCartesianProduct(AddrSpaces32, {S32})
+      .scalarize(0)
+      // Accept any address space as long as the size matches
+      .legalIf(sameSize(0, 1))
+      .widenScalarIf(smallerThan(1, 0),
+                     [](const LegalityQuery &Query) {
+                       return std::pair(
+                           1, LLT::scalar(Query.Types[0].getSizeInBits()));
+                     })
+      .narrowScalarIf(largerThan(1, 0), [](const LegalityQuery &Query) {
+        return std::pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
       });
 
   getActionDefinitionsBuilder(G_PTRTOINT)
-    // List the common cases
-    .legalForCartesianProduct(AddrSpaces64, {S64})
-    .legalForCartesianProduct(AddrSpaces32, {S32})
-    .scalarize(0)
-    // Accept any address space as long as the size matches
-    .legalIf(sameSize(0, 1))
-    .widenScalarIf(smallerThan(0, 1),
-      [](const LegalityQuery &Query) {
-        return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
-      })
-    .narrowScalarIf(
-      largerThan(0, 1),
-      [](const LegalityQuery &Query) {
-        return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
+      // List the common cases
+      .legalForCartesianProduct(AddrSpaces64, {S64})
+      .legalForCartesianProduct(AddrSpaces32, {S32})
+      .scalarize(0)
+      // Accept any address space as long as the size matches
+      .legalIf(sameSize(0, 1))
+      .widenScalarIf(smallerThan(0, 1),
+                     [](const LegalityQuery &Query) {
+                       return std::pair(
+                           0, LLT::scalar(Query.Types[1].getSizeInBits()));
+                     })
+      .narrowScalarIf(largerThan(0, 1), [](const LegalityQuery &Query) {
+        return std::pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
       });
 
   getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
@@ -1223,16 +1221,16 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
 
               // Split extloads.
               if (DstSize > MemSize)
-                return std::make_pair(0, LLT::scalar(MemSize));
+                return std::pair(0, LLT::scalar(MemSize));
 
               unsigned MaxSize = maxSizeForAddrSpace(ST,
                                                      PtrTy.getAddressSpace(),
                                                      Op == G_LOAD);
               if (MemSize > MaxSize)
-                return std::make_pair(0, LLT::scalar(MaxSize));
+                return std::pair(0, LLT::scalar(MaxSize));
 
               uint64_t Align = Query.MMODescrs[0].AlignInBits;
-              return std::make_pair(0, LLT::scalar(Align));
+              return std::pair(0, LLT::scalar(Align));
             })
         .fewerElementsIf(
             [=](const LegalityQuery &Query) -> bool {
@@ -1259,7 +1257,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
                 unsigned EltSize = EltTy.getSizeInBits();
 
                 if (MaxSize % EltSize == 0) {
-                  return std::make_pair(
+                  return std::pair(
                       0, LLT::scalarOrVector(
                              ElementCount::getFixed(MaxSize / EltSize), EltTy));
                 }
@@ -1270,15 +1268,15 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
                 // The scalars will need to be re-legalized.
                 if (NumPieces == 1 || NumPieces >= NumElts ||
                     NumElts % NumPieces != 0)
-                  return std::make_pair(0, EltTy);
+                  return std::pair(0, EltTy);
 
-                return std::make_pair(
-                    0, LLT::fixed_vector(NumElts / NumPieces, EltTy));
+                return std::pair(0,
+                                 LLT::fixed_vector(NumElts / NumPieces, EltTy));
               }
 
               // FIXME: We could probably handle weird extending loads better.
               if (DstTy.getSizeInBits() > MemSize)
-                return std::make_pair(0, EltTy);
+                return std::pair(0, EltTy);
 
               unsigned EltSize = EltTy.getSizeInBits();
               unsigned DstSize = DstTy.getSizeInBits();
@@ -1287,13 +1285,13 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
                 // to the widest type. TODO: Account for alignment. As-is it
                 // should be OK, since the new parts will be further legalized.
                 unsigned FloorSize = PowerOf2Floor(DstSize);
-                return std::make_pair(
+                return std::pair(
                     0, LLT::scalarOrVector(
                            ElementCount::getFixed(FloorSize / EltSize), EltTy));
               }
 
               // May need relegalization for the scalars.
-              return std::make_pair(0, EltTy);
+              return std::pair(0, EltTy);
             })
     .minScalar(0, S32)
     .narrowScalarIf(isWideScalarExtLoadTruncStore(0), changeTo(0, S32))
@@ -1472,7 +1470,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
           const unsigned VecSize = VecTy.getSizeInBits();
 
           const unsigned TargetEltSize = DstEltSize % 64 == 0 ? 64 : 32;
-          return std::make_pair(
+          return std::pair(
               VecTypeIdx,
               LLT::fixed_vector(VecSize / TargetEltSize, TargetEltSize));
         })
@@ -1638,7 +1636,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
           if (RoundedTo < NewSizeInBits)
             NewSizeInBits = RoundedTo;
         }
-        return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
+        return std::pair(BigTyIdx, LLT::scalar(NewSizeInBits));
       })
       // Any vectors left are the wrong size. Scalarize them.
       .scalarize(0)
@@ -4274,7 +4272,7 @@ AMDGPULegalizerInfo::splitBufferOffsets(MachineIRBuilder &B,
   if (!BaseReg)
     BaseReg = B.buildConstant(S32, 0).getReg(0);
 
-  return std::make_pair(BaseReg, ImmOffset);
+  return std::pair(BaseReg, ImmOffset);
 }
 
 /// Update \p MMO based on the offset inputs to a raw/struct buffer intrinsic.

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
index 2a35937590b2f..c96fab08a2676 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
@@ -1776,7 +1776,7 @@ static void removeExternalCFGEdges(MachineBasicBlock *StartMBB,
 
   for (MachineBasicBlock *Pred : StartMBB->predecessors())
     if (Pred != EndMBB)
-      Succs.insert(std::make_pair(Pred, StartMBB));
+      Succs.insert(std::pair(Pred, StartMBB));
 
   for (auto SI : Succs) {
     std::pair<MachineBasicBlock *, MachineBasicBlock *> Edge = SI;
@@ -2072,8 +2072,7 @@ void AMDGPUMachineCFGStructurizer::prunePHIInfo(MachineBasicBlock *MBB) {
         MachineBasicBlock *SourceMBB = Source.second;
         MachineOperand *Def = &(*(MRI->def_begin(SourceReg)));
         if (Def->getParent()->getParent() != MBB) {
-          ElimiatedSources.push_back(
-              std::make_tuple(DestReg, SourceReg, SourceMBB));
+          ElimiatedSources.push_back(std::tuple(DestReg, SourceReg, SourceMBB));
         }
       }
     }

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
index 3414225211115..9f2e2bfdaf789 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
@@ -51,7 +51,7 @@ AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF)
 unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
                                                   const GlobalVariable &GV,
                                                   Align Trailing) {
-  auto Entry = LocalMemoryObjects.insert(std::make_pair(&GV, 0));
+  auto Entry = LocalMemoryObjects.insert(std::pair(&GV, 0));
   if (!Entry.second)
     return Entry.first->second;
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index f514f7919188d..ef895c0a7961a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -221,7 +221,7 @@ AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
     ST.makeLIDRangeMetadata(LocalSizeY);
     ST.makeLIDRangeMetadata(LocalSizeZ);
 
-    return std::make_pair(LocalSizeY, LocalSizeZ);
+    return std::pair(LocalSizeY, LocalSizeZ);
   }
 
   // We must read the size out of the dispatch pointer.
@@ -290,7 +290,7 @@ AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
   // Extract y component. Upper half of LoadZU should be zero already.
   Value *Y = Builder.CreateLShr(LoadXY, 16);
 
-  return std::make_pair(Y, LoadZU);
+  return std::pair(Y, LoadZU);
 }
 
 Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp
index 6e823fb8b1d8b..5a4ab467731ec 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp
@@ -285,7 +285,7 @@ bool AMDGPUPropagateAttributes::process() {
           NewRoots.insert(NewF);
         }
 
-        ToReplace.push_back(std::make_pair(CI, NewF));
+        ToReplace.push_back(std::pair(CI, NewF));
         Replaced.insert(&F);
 
         Changed = true;

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 4d62244e54389..1e754402714f5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -914,7 +914,7 @@ bool AMDGPURegisterBankInfo::executeInWaterfallLoop(
       Op.setReg(CurrentLaneReg);
 
       // Make sure we don't re-process this register again.
-      WaterfalledRegMap.insert(std::make_pair(OldReg, Op.getReg()));
+      WaterfalledRegMap.insert(std::pair(OldReg, Op.getReg()));
     }
   }
 
@@ -1733,17 +1733,17 @@ unpackV2S16ToS32(MachineIRBuilder &B, Register Src, unsigned ExtOpcode) {
   if (ExtOpcode == TargetOpcode::G_SEXT) {
     auto ExtLo = B.buildSExtInReg(S32, Bitcast, 16);
     auto ShiftHi = B.buildAShr(S32, Bitcast, B.buildConstant(S32, 16));
-    return std::make_pair(ExtLo.getReg(0), ShiftHi.getReg(0));
+    return std::pair(ExtLo.getReg(0), ShiftHi.getReg(0));
   }
 
   auto ShiftHi = B.buildLShr(S32, Bitcast, B.buildConstant(S32, 16));
   if (ExtOpcode == TargetOpcode::G_ZEXT) {
     auto ExtLo = B.buildAnd(S32, Bitcast, B.buildConstant(S32, 0xffff));
-    return std::make_pair(ExtLo.getReg(0), ShiftHi.getReg(0));
+    return std::pair(ExtLo.getReg(0), ShiftHi.getReg(0));
   }
 
   assert(ExtOpcode == TargetOpcode::G_ANYEXT);
-  return std::make_pair(Bitcast.getReg(0), ShiftHi.getReg(0));
+  return std::pair(Bitcast.getReg(0), ShiftHi.getReg(0));
 }
 
 // For cases where only a single copy is inserted for matching register banks.
@@ -1789,14 +1789,14 @@ static std::pair<Register, unsigned>
 getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg) {
   int64_t Const;
   if (mi_match(Reg, MRI, m_ICst(Const)))
-    return std::make_pair(Register(), Const);
+    return std::pair(Register(), Const);
 
   Register Base;
   if (mi_match(Reg, MRI, m_GAdd(m_Reg(Base), m_ICst(Const))))
-    return std::make_pair(Base, Const);
+    return std::pair(Base, Const);
 
   // TODO: Handle G_OR used for add case
-  return std::make_pair(Reg, 0);
+  return std::pair(Reg, 0);
 }
 
 std::pair<Register, unsigned>

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp
index c356171f5cc42..299ac106ebee4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp
@@ -171,7 +171,7 @@ class ReplaceLDSUseImpl {
   // Insert new global LDS pointer which points to LDS.
   GlobalVariable *createLDSPointer(GlobalVariable *GV) {
     // LDS pointer which points to LDS is already created? Return it.
-    auto PointerEntry = LDSToPointer.insert(std::make_pair(GV, nullptr));
+    auto PointerEntry = LDSToPointer.insert(std::pair(GV, nullptr));
     if (!PointerEntry.second)
       return PointerEntry.first->second;
 
@@ -199,7 +199,7 @@ class ReplaceLDSUseImpl {
   BasicBlock *activateLaneZero(Function *K) {
     // If the entry basic block of kernel K is already split, then return
     // newly created basic block.
-    auto BasicBlockEntry = KernelToInitBB.insert(std::make_pair(K, nullptr));
+    auto BasicBlockEntry = KernelToInitBB.insert(std::pair(K, nullptr));
     if (!BasicBlockEntry.second)
       return BasicBlockEntry.first->second;
 
@@ -227,7 +227,7 @@ class ReplaceLDSUseImpl {
                             GlobalVariable *LDSPointer) {
     // If LDS pointer is already initialized within K, then nothing to do.
     auto PointerEntry = KernelToLDSPointers.insert(
-        std::make_pair(K, SmallPtrSet<GlobalVariable *, 8>()));
+        std::pair(K, SmallPtrSet<GlobalVariable *, 8>()));
     if (!PointerEntry.second)
       if (PointerEntry.first->second.contains(LDSPointer))
         return;
@@ -297,10 +297,10 @@ class ReplaceLDSUseImpl {
     // If the instruction which replaces LDS within F is already created, then
     // return it.
     auto LDSEntry = FunctionToLDSToReplaceInst.insert(
-        std::make_pair(F, DenseMap<GlobalVariable *, Value *>()));
+        std::pair(F, DenseMap<GlobalVariable *, Value *>()));
     if (!LDSEntry.second) {
       auto ReplaceInstEntry =
-          LDSEntry.first->second.insert(std::make_pair(GV, nullptr));
+          LDSEntry.first->second.insert(std::pair(GV, nullptr));
       if (!ReplaceInstEntry.second)
         return ReplaceInstEntry.first->second;
     }
@@ -559,7 +559,7 @@ getFunctionToInstsMap(User *U, bool CollectKernelInsts) {
       }
     }
 
-    FunctionToInsts.insert(std::make_pair(F, SmallPtrSet<Instruction *, 8>()));
+    FunctionToInsts.insert(std::pair(F, SmallPtrSet<Instruction *, 8>()));
     FunctionToInsts[F].insert(I);
   }
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp
index 01cb14714ad98..31e134d42e237 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp
@@ -126,8 +126,8 @@ bool AMDGPUResourceUsageAnalysis::runOnModule(Module &M) {
     MachineFunction *MF = MMI.getMachineFunction(*F);
     assert(MF && "function must have been generated already");
 
-    auto CI = CallGraphResourceInfo.insert(
-        std::make_pair(F, SIFunctionResourceInfo()));
+    auto CI =
+        CallGraphResourceInfo.insert(std::pair(F, SIFunctionResourceInfo()));
     SIFunctionResourceInfo &Info = CI.first->second;
     assert(CI.second && "should only be called once per function");
     Info = analyzeResourceUsage(*MF, TM);
@@ -142,8 +142,8 @@ bool AMDGPUResourceUsageAnalysis::runOnModule(Module &M) {
     if (!F || F->isDeclaration())
       continue;
 
-    auto CI = CallGraphResourceInfo.insert(
-      std::make_pair(F, SIFunctionResourceInfo()));
+    auto CI =
+        CallGraphResourceInfo.insert(std::pair(F, SIFunctionResourceInfo()));
     if (!CI.second) // Skip already visited functions
       continue;
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
index 15736005409e9..750253b09c611 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
@@ -28,7 +28,7 @@
 /// into something like this:
 ///
 ///  std::pair<int, int> foo(int a, int b) {
-///     return std::make_pair(a + b, bar());
+///     return std::pair(a + b, bar());
 /// }
 ///
 /// Typically the incoming pointer is a simple alloca for a temporary variable

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index 8b5d0b88b0a55..7e827d51586a6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -367,9 +367,9 @@ AMDGPUSubtarget::getDefaultFlatWorkGroupSize(CallingConv::ID CC) const {
   case CallingConv::AMDGPU_ES:
   case CallingConv::AMDGPU_GS:
   case CallingConv::AMDGPU_PS:
-    return std::make_pair(1, getWavefrontSize());
+    return std::pair(1, getWavefrontSize());
   default:
-    return std::make_pair(1u, getMaxFlatWorkGroupSize());
+    return std::pair(1u, getMaxFlatWorkGroupSize());
   }
 }
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 56b3d2d209e1c..0c7acd5a589fd 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -761,13 +761,13 @@ AMDGPUTargetMachine::getPredicatedAddrSpace(const Value *V) const {
   if (auto *II = dyn_cast<IntrinsicInst>(V)) {
     switch (II->getIntrinsicID()) {
     case Intrinsic::amdgcn_is_shared:
-      return std::make_pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
+      return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
     case Intrinsic::amdgcn_is_private:
-      return std::make_pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
+      return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
     default:
       break;
     }
-    return std::make_pair(nullptr, -1);
+    return std::pair(nullptr, -1);
   }
   // Check the global pointer predication based on
   // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
@@ -778,9 +778,9 @@ AMDGPUTargetMachine::getPredicatedAddrSpace(const Value *V) const {
           m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
                   m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
                       m_Deferred(Ptr))))))
-    return std::make_pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
+    return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
 
-  return std::make_pair(nullptr, -1);
+  return std::pair(nullptr, -1);
 }
 
 unsigned

diff  --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index e6d9d5ddfbf47..e6671b233ce0b 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -5492,10 +5492,10 @@ bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
   const char *AssemblerDirectiveEnd;
   std::tie(AssemblerDirectiveBegin, AssemblerDirectiveEnd) =
       isHsaAbiVersion3AndAbove(&getSTI())
-          ? std::make_tuple(HSAMD::V3::AssemblerDirectiveBegin,
-                            HSAMD::V3::AssemblerDirectiveEnd)
-          : std::make_tuple(HSAMD::AssemblerDirectiveBegin,
-                            HSAMD::AssemblerDirectiveEnd);
+          ? std::tuple(HSAMD::V3::AssemblerDirectiveBegin,
+                       HSAMD::V3::AssemblerDirectiveEnd)
+          : std::tuple(HSAMD::AssemblerDirectiveBegin,
+                       HSAMD::AssemblerDirectiveEnd);
 
   if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
     return Error(getLoc(),

diff  --git a/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp b/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp
index 6f82148854c41..366bc0a8ec0d5 100644
--- a/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp
@@ -259,10 +259,10 @@ bool GCNNSAReassign::runOnMachineFunction(MachineFunction &MF) {
       default:
         continue;
       case NSA_Status::CONTIGUOUS:
-        Candidates.push_back(std::make_pair(&MI, true));
+        Candidates.push_back(std::pair(&MI, true));
         break;
       case NSA_Status::NON_CONTIGUOUS:
-        Candidates.push_back(std::make_pair(&MI, false));
+        Candidates.push_back(std::pair(&MI, false));
         ++NumNSAInstructions;
         break;
       }

diff  --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 9b76122b77d8b..cd295e04fac3f 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -468,7 +468,7 @@ GCNScheduleDAGMILive::createSchedStage(GCNSchedStageID SchedStageID) {
 void GCNScheduleDAGMILive::schedule() {
   // Collect all scheduling regions. The actual scheduling is performed in
   // GCNScheduleDAGMILive::finalizeSchedule.
-  Regions.push_back(std::make_pair(RegionBegin, RegionEnd));
+  Regions.push_back(std::pair(RegionBegin, RegionEnd));
 }
 
 GCNRegPressure
@@ -841,7 +841,7 @@ void GCNSchedStage::setupNewBlock() {
 }
 
 void GCNSchedStage::finalizeGCNRegion() {
-  DAG.Regions[RegionIdx] = std::make_pair(DAG.RegionBegin, DAG.RegionEnd);
+  DAG.Regions[RegionIdx] = std::pair(DAG.RegionBegin, DAG.RegionEnd);
   DAG.RescheduleRegions[RegionIdx] = false;
   if (S.HasHighPressure)
     DAG.RegionsWithHighRP[RegionIdx] = true;
@@ -1065,7 +1065,7 @@ void GCNSchedStage::revertScheduling() {
   // RegionBegin and RegionEnd if needed.
   DAG.placeDebugValues();
 
-  DAG.Regions[RegionIdx] = std::make_pair(DAG.RegionBegin, DAG.RegionEnd);
+  DAG.Regions[RegionIdx] = std::pair(DAG.RegionBegin, DAG.RegionEnd);
 }
 
 void PreRARematStage::collectRematerializableInstructions() {
@@ -1326,22 +1326,21 @@ void GCNScheduleDAGMILive::updateRegionBoundaries(
       // MI is in a region with size 1, after removing, the region will be
       // size 0, set RegionBegin and RegionEnd to pass end of block iterator.
       RegionBoundaries[I] =
-          std::make_pair(MI->getParent()->end(), MI->getParent()->end());
+          std::pair(MI->getParent()->end(), MI->getParent()->end());
       return;
     }
     if (MI == RegionBoundaries[I].first) {
       if (Removing)
         RegionBoundaries[I] =
-            std::make_pair(std::next(MI), RegionBoundaries[I].second);
+            std::pair(std::next(MI), RegionBoundaries[I].second);
       else
         // Inserted NewMI in front of region, set new RegionBegin to NewMI
-        RegionBoundaries[I] = std::make_pair(MachineBasicBlock::iterator(NewMI),
-                                             RegionBoundaries[I].second);
+        RegionBoundaries[I] = std::pair(MachineBasicBlock::iterator(NewMI),
+                                        RegionBoundaries[I].second);
       return;
     }
     if (Removing && MI == RegionBoundaries[I].second) {
-      RegionBoundaries[I] =
-          std::make_pair(RegionBoundaries[I].first, std::prev(MI));
+      RegionBoundaries[I] = std::pair(RegionBoundaries[I].first, std::prev(MI));
       return;
     }
   }

diff  --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
index d04ec6490aae5..4056274cd4404 100644
--- a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -275,7 +275,7 @@ R600InstrInfo::getSrcs(MachineInstr &MI) const {
       if (Reg == R600::ALU_CONST) {
         MachineOperand &Sel =
             MI.getOperand(getOperandIdx(MI.getOpcode(), Op[1]));
-        Result.push_back(std::make_pair(&MO, Sel.getImm()));
+        Result.push_back(std::pair(&MO, Sel.getImm()));
         continue;
       }
     }
@@ -296,19 +296,19 @@ R600InstrInfo::getSrcs(MachineInstr &MI) const {
     Register Reg = MO.getReg();
     if (Reg == R600::ALU_CONST) {
       MachineOperand &Sel = MI.getOperand(getOperandIdx(MI.getOpcode(), Op[1]));
-      Result.push_back(std::make_pair(&MO, Sel.getImm()));
+      Result.push_back(std::pair(&MO, Sel.getImm()));
       continue;
     }
     if (Reg == R600::ALU_LITERAL_X) {
       MachineOperand &Operand =
           MI.getOperand(getOperandIdx(MI.getOpcode(), R600::OpName::literal));
       if (Operand.isImm()) {
-        Result.push_back(std::make_pair(&MO, Operand.getImm()));
+        Result.push_back(std::pair(&MO, Operand.getImm()));
         continue;
       }
       assert(Operand.isGlobal());
     }
-    Result.push_back(std::make_pair(&MO, 0));
+    Result.push_back(std::pair(&MO, 0));
   }
   return Result;
 }
@@ -326,11 +326,11 @@ R600InstrInfo::ExtractSrcs(MachineInstr &MI,
     Register Reg = Src.first->getReg();
     int Index = RI.getEncodingValue(Reg) & 0xff;
     if (Reg == R600::OQAP) {
-      Result.push_back(std::make_pair(Index, 0U));
+      Result.push_back(std::pair(Index, 0U));
     }
     if (PV.find(Reg) != PV.end()) {
       // 255 is used to tells its a PS/PV reg
-      Result.push_back(std::make_pair(255, 0U));
+      Result.push_back(std::pair(255, 0U));
       continue;
     }
     if (Index > 127) {
@@ -339,7 +339,7 @@ R600InstrInfo::ExtractSrcs(MachineInstr &MI,
       continue;
     }
     unsigned Chan = RI.getHWRegChan(Reg);
-    Result.push_back(std::make_pair(Index, Chan));
+    Result.push_back(std::pair(Index, Chan));
   }
   for (; i < 3; ++i)
     Result.push_back(DummyPair);

diff  --git a/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp b/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp
index aa156190b7ae1..2a15c0123b740 100644
--- a/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp
+++ b/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp
@@ -283,7 +283,7 @@ class R600OpenCLImageTypeLoweringPass : public ModulePass {
       Modified = true;
     }
     if (!Modified) {
-      return std::make_tuple(nullptr, nullptr);
+      return std::tuple(nullptr, nullptr);
     }
 
     // Create function with new signature and clone the old body into it.
@@ -311,7 +311,7 @@ class R600OpenCLImageTypeLoweringPass : public ModulePass {
       KernelMDArgs.push_back(MDNode::get(*Context, MDV));
     MDNode *NewMDNode = MDNode::get(*Context, KernelMDArgs);
 
-    return std::make_tuple(NewF, NewMDNode);
+    return std::tuple(NewF, NewMDNode);
   }
 
   bool transformKernels(Module &M) {

diff  --git a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
index a1327f9e087b9..f232bc9b3852f 100644
--- a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
@@ -162,7 +162,7 @@ Value *SIAnnotateControlFlow::popSaved() {
 
 /// Push a BB and saved value to the control flow stack
 void SIAnnotateControlFlow::push(BasicBlock *BB, Value *Saved) {
-  Stack.push_back(std::make_pair(BB, Saved));
+  Stack.push_back(std::pair(BB, Saved));
 }
 
 /// Can the condition represented by this PHI node treated like

diff  --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 01f5fb2081719..f38b5d6fa3cf4 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -196,7 +196,7 @@ getCopyRegClasses(const MachineInstr &Copy,
                                          ? MRI.getRegClass(DstReg)
                                          : TRI.getPhysRegClass(DstReg);
 
-  return std::make_pair(SrcRC, DstRC);
+  return std::pair(SrcRC, DstRC);
 }
 
 static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
@@ -949,8 +949,8 @@ bool SIFixSGPRCopies::needToBeConvertedToVALU(V2SCopyInfo *Info) {
         // the COPY has already been MoveToVALUed
         continue;
 
-      SrcRegs.insert(std::make_pair(SiblingCopy->getOperand(1).getReg(),
-                                    SiblingCopy->getOperand(1).getSubReg()));
+      SrcRegs.insert(std::pair(SiblingCopy->getOperand(1).getReg(),
+                               SiblingCopy->getOperand(1).getSubReg()));
     }
   }
   Info->SiblingPenalty = SrcRegs.size();

diff  --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 5f1a4c1b31d07..3820077f08dc9 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1457,7 +1457,7 @@ SIFoldOperands::isOMod(const MachineInstr &MI) const {
         ((Op == AMDGPU::V_MUL_F64_e64 || Op == AMDGPU::V_MUL_F16_e64 ||
           Op == AMDGPU::V_MUL_F16_t16_e64) &&
          MFI->getMode().FP64FP16OutputDenormals))
-      return std::make_pair(nullptr, SIOutMods::NONE);
+      return std::pair(nullptr, SIOutMods::NONE);
 
     const MachineOperand *RegOp = nullptr;
     const MachineOperand *ImmOp = nullptr;
@@ -1470,7 +1470,7 @@ SIFoldOperands::isOMod(const MachineInstr &MI) const {
       ImmOp = Src1;
       RegOp = Src0;
     } else
-      return std::make_pair(nullptr, SIOutMods::NONE);
+      return std::pair(nullptr, SIOutMods::NONE);
 
     int OMod = getOModValue(Op, ImmOp->getImm());
     if (OMod == SIOutMods::NONE ||
@@ -1478,9 +1478,9 @@ SIFoldOperands::isOMod(const MachineInstr &MI) const {
         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
-      return std::make_pair(nullptr, SIOutMods::NONE);
+      return std::pair(nullptr, SIOutMods::NONE);
 
-    return std::make_pair(RegOp, OMod);
+    return std::pair(RegOp, OMod);
   }
   case AMDGPU::V_ADD_F64_e64:
   case AMDGPU::V_ADD_F32_e64:
@@ -1491,7 +1491,7 @@ SIFoldOperands::isOMod(const MachineInstr &MI) const {
         ((Op == AMDGPU::V_ADD_F64_e64 || Op == AMDGPU::V_ADD_F16_e64 ||
           Op == AMDGPU::V_ADD_F16_t16_e64) &&
          MFI->getMode().FP64FP16OutputDenormals))
-      return std::make_pair(nullptr, SIOutMods::NONE);
+      return std::pair(nullptr, SIOutMods::NONE);
 
     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
@@ -1503,12 +1503,12 @@ SIFoldOperands::isOMod(const MachineInstr &MI) const {
         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
-      return std::make_pair(Src0, SIOutMods::MUL2);
+      return std::pair(Src0, SIOutMods::MUL2);
 
-    return std::make_pair(nullptr, SIOutMods::NONE);
+    return std::pair(nullptr, SIOutMods::NONE);
   }
   default:
-    return std::make_pair(nullptr, SIOutMods::NONE);
+    return std::pair(nullptr, SIOutMods::NONE);
   }
 }
 

diff  --git a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
index 471d70a95ffdc..56d456b29e84e 100644
--- a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
@@ -232,7 +232,7 @@ void SIFormMemoryClauses::collectRegUses(const MachineInstr &MI,
     auto Loc = Map.find(Reg);
     unsigned State = getMopState(MO);
     if (Loc == Map.end()) {
-      Map[Reg] = std::make_pair(State, Mask);
+      Map[Reg] = std::pair(State, Mask);
     } else {
       Loc->second.first |= State;
       Loc->second.second |= Mask;

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 524f51916be95..1fa17cae87535 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -3240,7 +3240,7 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
     }
 
     if (VA.isRegLoc()) {
-      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+      RegsToPass.push_back(std::pair(VA.getLocReg(), Arg));
     } else {
       assert(VA.isMemLoc());
 
@@ -3551,7 +3551,7 @@ splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) {
 
   MBB.addSuccessor(LoopBB);
 
-  return std::make_pair(LoopBB, RemainderBB);
+  return std::pair(LoopBB, RemainderBB);
 }
 
 /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it.
@@ -3769,9 +3769,9 @@ computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
   // Skip out of bounds offsets, or else we would end up using an undefined
   // register.
   if (Offset >= NumElts || Offset < 0)
-    return std::make_pair(AMDGPU::sub0, Offset);
+    return std::pair(AMDGPU::sub0, Offset);
 
-  return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0);
+  return std::pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0);
 }
 
 static void setM0ToIndexFromSGPR(const SIInstrInfo *TII,
@@ -4668,8 +4668,8 @@ SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
   SDValue Lo0, Hi0;
   SDValue Op0 = Op.getOperand(0);
   std::tie(Lo0, Hi0) = Op0.getValueType().isVector()
-                         ? DAG.SplitVectorOperand(Op.getNode(), 0)
-                         : std::make_pair(Op0, Op0);
+                           ? DAG.SplitVectorOperand(Op.getNode(), 0)
+                           : std::pair(Op0, Op0);
   SDValue Lo1, Hi1;
   std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
   SDValue Lo2, Hi2;
@@ -12262,7 +12262,7 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
       default:
         RC = SIRegisterInfo::getSGPRClassForBitWidth(BitWidth);
         if (!RC)
-          return std::make_pair(0U, nullptr);
+          return std::pair(0U, nullptr);
         break;
       }
       break;
@@ -12274,7 +12274,7 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
       default:
         RC = TRI->getVGPRClassForBitWidth(BitWidth);
         if (!RC)
-          return std::make_pair(0U, nullptr);
+          return std::pair(0U, nullptr);
         break;
       }
       break;
@@ -12288,7 +12288,7 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
       default:
         RC = TRI->getAGPRClassForBitWidth(BitWidth);
         if (!RC)
-          return std::make_pair(0U, nullptr);
+          return std::pair(0U, nullptr);
         break;
       }
       break;
@@ -12297,7 +12297,7 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
     // even if they are not reported as legal
     if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
                VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
-      return std::make_pair(0U, RC);
+      return std::pair(0U, RC);
   }
 
   if (Constraint.startswith("{") && Constraint.endswith("}")) {
@@ -12329,13 +12329,13 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
             RC = TRI->getAGPRClassForBitWidth(Width);
           if (RC) {
             Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, RC);
-            return std::make_pair(Reg, RC);
+            return std::pair(Reg, RC);
           }
         }
       } else {
         bool Failed = RegName.getAsInteger(10, Idx);
         if (!Failed && Idx < RC->getNumRegs())
-          return std::make_pair(RC->getRegister(Idx), RC);
+          return std::pair(RC->getRegister(Idx), RC);
       }
     }
   }

diff  --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index c596603d7b5a6..00ec88ff0edf8 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -1621,7 +1621,7 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
         // there cannot be a vector store to the same memory location.
         if (!Memop->isInvariant()) {
           const Value *Ptr = Memop->getValue();
-          SLoadAddresses.insert(std::make_pair(Ptr, Inst.getParent()));
+          SLoadAddresses.insert(std::pair(Ptr, Inst.getParent()));
         }
       }
       if (ST->hasReadVCCZBug()) {

diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index c14b8df1f3902..3ba98128dd605 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -2278,7 +2278,7 @@ SIInstrInfo::expandMovDPP64(MachineInstr &MI) const {
       AMDGPU::isLegal64BitDPPControl(
         getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl)->getImm())) {
     MI.setDesc(get(AMDGPU::V_MOV_B64_dpp));
-    return std::make_pair(&MI, nullptr);
+    return std::pair(&MI, nullptr);
   }
 
   MachineBasicBlock &MBB = *MI.getParent();
@@ -2331,7 +2331,7 @@ SIInstrInfo::expandMovDPP64(MachineInstr &MI) const {
       .addImm(AMDGPU::sub1);
 
   MI.eraseFromParent();
-  return std::make_pair(Split[0], Split[1]);
+  return std::pair(Split[0], Split[1]);
 }
 
 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
@@ -5815,7 +5815,7 @@ extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) {
       .addReg(SRsrcFormatHi)
       .addImm(AMDGPU::sub3);
 
-  return std::make_tuple(RsrcPtr, NewSRsrc);
+  return std::tuple(RsrcPtr, NewSRsrc);
 }
 
 MachineBasicBlock *
@@ -6591,10 +6591,10 @@ SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
     MachineBasicBlock *NewBB = legalizeOperands(Inst, MDT);
 
     addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
-    return std::make_pair(true, NewBB);
+    return std::pair(true, NewBB);
   }
 
-  return std::make_pair(false, nullptr);
+  return std::pair(false, nullptr);
 }
 
 void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst,
@@ -7775,7 +7775,7 @@ SIInstrInfo::CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
 
 std::pair<unsigned, unsigned>
 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
-  return std::make_pair(TF & MO_MASK, TF & ~MO_MASK);
+  return std::pair(TF & MO_MASK, TF & ~MO_MASK);
 }
 
 ArrayRef<std::pair<unsigned, const char *>>

diff  --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 18c1875201549..d881b0b0a775e 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -1819,7 +1819,7 @@ SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI,
     Idx1 = Idxs[CI.Width][Paired.Width - 1];
   }
 
-  return std::make_pair(Idx0, Idx1);
+  return std::pair(Idx0, Idx1);
 }
 
 const TargetRegisterClass *
@@ -2166,7 +2166,7 @@ bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
         MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
       continue;
 
-    InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
+    InstsWCommonBase.push_back(std::pair(&MINext, MAddrNext.Offset));
 
     int64_t Dist = MAddr.Offset - MAddrNext.Offset;
     TargetLoweringBase::AddrMode AM;
@@ -2316,7 +2316,7 @@ SILoadStoreOptimizer::collectMergeableInsts(
     ++I;
   }
 
-  return std::make_pair(BlockI, Modified);
+  return std::pair(BlockI, Modified);
 }
 
 // Scan through looking for adjacent LDS operations with constant offsets from

diff  --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
index ff5587fbb0ca1..d200ad6df6fb6 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp
@@ -548,7 +548,7 @@ void SIScheduleBlock::addSucc(SIScheduleBlock *Succ,
   }
   if (Succ->isHighLatencyBlock())
     ++NumHighLatencySuccessors;
-  Succs.push_back(std::make_pair(Succ, Kind));
+  Succs.push_back(std::pair(Succ, Kind));
 
   assert(none_of(Preds,
                  [=](SIScheduleBlock *P) { return SuccID == P->getID(); }) &&

diff  --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index eb6a9f9ef37ce..26884a5a6ab52 100644
--- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -626,45 +626,33 @@ std::optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
 SIMemOpAccess::toSIAtomicScope(SyncScope::ID SSID,
                                SIAtomicAddrSpace InstrAddrSpace) const {
   if (SSID == SyncScope::System)
-    return std::make_tuple(SIAtomicScope::SYSTEM,
-                           SIAtomicAddrSpace::ATOMIC,
-                           true);
+    return std::tuple(SIAtomicScope::SYSTEM, SIAtomicAddrSpace::ATOMIC, true);
   if (SSID == MMI->getAgentSSID())
-    return std::make_tuple(SIAtomicScope::AGENT,
-                           SIAtomicAddrSpace::ATOMIC,
-                           true);
+    return std::tuple(SIAtomicScope::AGENT, SIAtomicAddrSpace::ATOMIC, true);
   if (SSID == MMI->getWorkgroupSSID())
-    return std::make_tuple(SIAtomicScope::WORKGROUP,
-                           SIAtomicAddrSpace::ATOMIC,
-                           true);
+    return std::tuple(SIAtomicScope::WORKGROUP, SIAtomicAddrSpace::ATOMIC,
+                      true);
   if (SSID == MMI->getWavefrontSSID())
-    return std::make_tuple(SIAtomicScope::WAVEFRONT,
-                           SIAtomicAddrSpace::ATOMIC,
-                           true);
+    return std::tuple(SIAtomicScope::WAVEFRONT, SIAtomicAddrSpace::ATOMIC,
+                      true);
   if (SSID == SyncScope::SingleThread)
-    return std::make_tuple(SIAtomicScope::SINGLETHREAD,
-                           SIAtomicAddrSpace::ATOMIC,
-                           true);
+    return std::tuple(SIAtomicScope::SINGLETHREAD, SIAtomicAddrSpace::ATOMIC,
+                      true);
   if (SSID == MMI->getSystemOneAddressSpaceSSID())
-    return std::make_tuple(SIAtomicScope::SYSTEM,
-                           SIAtomicAddrSpace::ATOMIC & InstrAddrSpace,
-                           false);
+    return std::tuple(SIAtomicScope::SYSTEM,
+                      SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, false);
   if (SSID == MMI->getAgentOneAddressSpaceSSID())
-    return std::make_tuple(SIAtomicScope::AGENT,
-                           SIAtomicAddrSpace::ATOMIC & InstrAddrSpace,
-                           false);
+    return std::tuple(SIAtomicScope::AGENT,
+                      SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, false);
   if (SSID == MMI->getWorkgroupOneAddressSpaceSSID())
-    return std::make_tuple(SIAtomicScope::WORKGROUP,
-                           SIAtomicAddrSpace::ATOMIC & InstrAddrSpace,
-                           false);
+    return std::tuple(SIAtomicScope::WORKGROUP,
+                      SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, false);
   if (SSID == MMI->getWavefrontOneAddressSpaceSSID())
-    return std::make_tuple(SIAtomicScope::WAVEFRONT,
-                           SIAtomicAddrSpace::ATOMIC & InstrAddrSpace,
-                           false);
+    return std::tuple(SIAtomicScope::WAVEFRONT,
+                      SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, false);
   if (SSID == MMI->getSingleThreadOneAddressSpaceSSID())
-    return std::make_tuple(SIAtomicScope::SINGLETHREAD,
-                           SIAtomicAddrSpace::ATOMIC & InstrAddrSpace,
-                           false);
+    return std::tuple(SIAtomicScope::SINGLETHREAD,
+                      SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, false);
   return std::nullopt;
 }
 

diff  --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 6151eb0f5e6a6..fd6f3ad43dea2 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -712,7 +712,7 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
         if (!Op2Def)
           return CheckRetType(std::nullopt);
 
-        return CheckRetType(std::make_pair(Op1Def, Op2Def));
+        return CheckRetType(std::pair(Op1Def, Op2Def));
       };
 
     MachineOperand *OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src0);

diff  --git a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
index 0a80779f8a5cd..d1775bb9ae2de 100644
--- a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
+++ b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp
@@ -107,10 +107,8 @@ struct PrintState {
 static raw_ostream &operator<<(raw_ostream &OS, const PrintState &PS) {
 
   static const std::pair<char, const char *> Mapping[] = {
-      std::make_pair(StateWQM, "WQM"),
-      std::make_pair(StateStrictWWM, "StrictWWM"),
-      std::make_pair(StateStrictWQM, "StrictWQM"),
-      std::make_pair(StateExact, "Exact")};
+      std::pair(StateWQM, "WQM"), std::pair(StateStrictWWM, "StrictWWM"),
+      std::pair(StateStrictWQM, "StrictWQM"), std::pair(StateExact, "Exact")};
   char State = PS.State;
   for (auto M : Mapping) {
     if (State & M.first) {

diff  --git a/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp
index 45eb6c3214769..419abab9b9d83 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp
@@ -47,8 +47,8 @@ static StringMap<int> createIndexMap(const ArrayRef<StringRef> &names,
   StringMap<int> map;
   assert(names.size() == altNames.size());
   for (unsigned i = 0; i < names.size(); ++i) {
-    map.insert(std::make_pair(names[i], i));
-    map.insert(std::make_pair(altNames[i], i));
+    map.insert(std::pair(names[i], i));
+    map.insert(std::pair(altNames[i], i));
   }
   return map;
 }


        


More information about the llvm-commits mailing list