[llvm] 61f1f2a - AMDGPU/GlobalISel: Initial Implementation of calls

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 20 08:21:48 PDT 2020


Author: Matt Arsenault
Date: 2020-07-20T11:13:22-04:00
New Revision: 61f1f2a2042343501dd5cfc2e378298d6653150c

URL: https://github.com/llvm/llvm-project/commit/61f1f2a2042343501dd5cfc2e378298d6653150c
DIFF: https://github.com/llvm/llvm-project/commit/61f1f2a2042343501dd5cfc2e378298d6653150c.diff

LOG: AMDGPU/GlobalISel: Initial Implementation of calls

Return values, and tail calls are not yet handled.

Added: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
    llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 05a4e3462a26..f010b4a5b52c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -15,26 +15,47 @@
 #include "AMDGPUCallLowering.h"
 #include "AMDGPU.h"
 #include "AMDGPUISelLowering.h"
+#include "AMDGPULegalizerInfo.h"
 #include "AMDGPUSubtarget.h"
 #include "AMDGPUTargetMachine.h"
+#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
 #include "SIISelLowering.h"
 #include "SIMachineFunctionInfo.h"
 #include "SIRegisterInfo.h"
-#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
 #include "llvm/CodeGen/Analysis.h"
 #include "llvm/CodeGen/CallingConvLower.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/Support/LowLevelTypeImpl.h"
 
+#define DEBUG_TYPE "amdgpu-call-lowering"
+
 using namespace llvm;
 
 namespace {
 
-struct OutgoingValueHandler : public CallLowering::ValueHandler {
+struct AMDGPUValueHandler : public CallLowering::ValueHandler {
+  AMDGPUValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
+                     CCAssignFn *AssignFn)
+    : ValueHandler(B, MRI, AssignFn) {}
+
+  /// Wrapper around extendRegister to ensure we extend to a full 32-bit
+  /// register.
+  Register extendRegisterMin32(Register ValVReg, CCValAssign &VA) {
+    if (VA.getLocVT().getSizeInBits() < 32) {
+      // 16-bit types are reported as legal for 32-bit registers. We need to
+      // extend and do a 32-bit copy to avoid the verifier complaining about it.
+      return MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0);
+    }
+
+    return extendRegister(ValVReg, VA);
+  }
+};
+
+struct OutgoingValueHandler : public AMDGPUValueHandler {
   OutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
                        MachineInstrBuilder MIB, CCAssignFn *AssignFn)
-      : ValueHandler(B, MRI, AssignFn), MIB(MIB) {}
+      : AMDGPUValueHandler(B, MRI, AssignFn), MIB(MIB) {}
 
   MachineInstrBuilder MIB;
 
@@ -52,13 +73,7 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
 
   void assignValueToReg(Register ValVReg, Register PhysReg,
                         CCValAssign &VA) override {
-    Register ExtReg;
-    if (VA.getLocVT().getSizeInBits() < 32) {
-      // 16-bit types are reported as legal for 32-bit registers. We need to
-      // extend and do a 32-bit copy to avoid the verifier complaining about it.
-      ExtReg = MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0);
-    } else
-      ExtReg = extendRegister(ValVReg, VA);
+    Register ExtReg = extendRegisterMin32(ValVReg, VA);
 
     // If this is a scalar return, insert a readfirstlane just in case the value
     // ends up in a VGPR.
@@ -85,12 +100,12 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
   }
 };
 
-struct IncomingArgHandler : public CallLowering::ValueHandler {
+struct IncomingArgHandler : public AMDGPUValueHandler {
   uint64_t StackUsed = 0;
 
   IncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
                      CCAssignFn *AssignFn)
-    : ValueHandler(B, MRI, AssignFn) {}
+    : AMDGPUValueHandler(B, MRI, AssignFn) {}
 
   Register getStackAddress(uint64_t Size, int64_t Offset,
                            MachinePointerInfo &MPO) override {
@@ -159,6 +174,97 @@ struct FormalArgHandler : public IncomingArgHandler {
   }
 };
 
+struct CallReturnHandler : public IncomingArgHandler {
+  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+                    MachineInstrBuilder MIB, CCAssignFn *AssignFn)
+    : IncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
+
+  void markPhysRegUsed(unsigned PhysReg) override {
+    MIB.addDef(PhysReg, RegState::Implicit);
+  }
+
+  MachineInstrBuilder MIB;
+};
+
+struct OutgoingArgHandler : public AMDGPUValueHandler {
+  MachineInstrBuilder MIB;
+  CCAssignFn *AssignFnVarArg;
+
+  /// For tail calls, the byte offset of the call's argument area from the
+  /// callee's. Unused elsewhere.
+  int FPDiff;
+
+  // Cache the SP register vreg if we need it more than once in this call site.
+  Register SPReg;
+
+  bool IsTailCall;
+
+  OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+                     MachineInstrBuilder MIB, CCAssignFn *AssignFn,
+                     CCAssignFn *AssignFnVarArg, bool IsTailCall = false,
+                     int FPDiff = 0)
+      : AMDGPUValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
+        AssignFnVarArg(AssignFnVarArg),
+        FPDiff(FPDiff), IsTailCall(IsTailCall) {}
+
+  bool isIncomingArgumentHandler() const override { return false; }
+
+  Register getStackAddress(uint64_t Size, int64_t Offset,
+                           MachinePointerInfo &MPO) override {
+    MachineFunction &MF = MIRBuilder.getMF();
+    const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32);
+    const LLT S32 = LLT::scalar(32);
+
+    if (IsTailCall) {
+      llvm_unreachable("implement me");
+    }
+
+    const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+
+    if (!SPReg)
+      SPReg = MIRBuilder.buildCopy(PtrTy, MFI->getStackPtrOffsetReg()).getReg(0);
+
+    auto OffsetReg = MIRBuilder.buildConstant(S32, Offset);
+
+    auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg);
+    MPO = MachinePointerInfo::getStack(MF, Offset);
+    return AddrReg.getReg(0);
+  }
+
+  void assignValueToReg(Register ValVReg, Register PhysReg,
+                        CCValAssign &VA) override {
+    MIB.addUse(PhysReg, RegState::Implicit);
+    Register ExtReg = extendRegisterMin32(ValVReg, VA);
+    MIRBuilder.buildCopy(PhysReg, ExtReg);
+  }
+
+  void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
+                            MachinePointerInfo &MPO, CCValAssign &VA) override {
+    MachineFunction &MF = MIRBuilder.getMF();
+    uint64_t LocMemOffset = VA.getLocMemOffset();
+    const auto &ST = MF.getSubtarget<GCNSubtarget>();
+
+    auto MMO = MF.getMachineMemOperand(
+      MPO, MachineMemOperand::MOStore, Size,
+      commonAlignment(ST.getStackAlignment(), LocMemOffset));
+    MIRBuilder.buildStore(ValVReg, Addr, *MMO);
+  }
+
+  void assignValueToAddress(const CallLowering::ArgInfo &Arg, Register Addr,
+                            uint64_t Size, MachinePointerInfo &MPO,
+                            CCValAssign &VA) override {
+    Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt
+                           ? extendRegister(Arg.Regs[0], VA)
+                           : Arg.Regs[0];
+
+    // If we extended we might need to adjust the MMO's Size.
+    const LLT RegTy = MRI.getType(ValVReg);
+    if (RegTy.getSizeInBytes() > Size)
+      Size = RegTy.getSizeInBytes();
+
+    assignValueToAddress(ValVReg, Addr, Size, MPO, VA);
+  }
+};
 }
 
 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
@@ -181,9 +287,10 @@ static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) {
 
 void AMDGPUCallLowering::splitToValueTypes(
   MachineIRBuilder &B,
-  const ArgInfo &OrigArg, unsigned OrigArgIdx,
+  const ArgInfo &OrigArg,
   SmallVectorImpl<ArgInfo> &SplitArgs,
   const DataLayout &DL, CallingConv::ID CallConv,
+  bool IsOutgoing,
   SplitArgTy PerformArgSplit) const {
   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
   LLVMContext &Ctx = OrigArg.Ty->getContext();
@@ -202,7 +309,7 @@ void AMDGPUCallLowering::splitToValueTypes(
     Type *Ty = VT.getTypeForEVT(Ctx);
     LLT LLTy = getLLTForType(*Ty, DL);
 
-    if (OrigArgIdx == AttributeList::ReturnIndex && VT.isScalarInteger()) {
+    if (IsOutgoing && VT.isScalarInteger()) {
       unsigned ExtendOp = TargetOpcode::G_ANYEXT;
       if (OrigArg.Flags[0].isSExt()) {
         assert(OrigArg.Regs.size() == 1 && "expect only simple return values");
@@ -214,7 +321,7 @@ void AMDGPUCallLowering::splitToValueTypes(
 
       EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT,
                                           extOpcodeToISDExtOpcode(ExtendOp));
-      if (ExtVT != VT) {
+      if (ExtVT.getSizeInBits() != VT.getSizeInBits()) {
         VT = ExtVT;
         Ty = ExtVT.getTypeForEVT(Ctx);
         LLTy = getLLTForType(*Ty, DL);
@@ -323,7 +430,7 @@ bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B,
   SmallVector<ArgInfo, 4> SplitRetInfos;
 
   splitToValueTypes(
-    B, OrigRetInfo, AttributeList::ReturnIndex, SplitRetInfos, DL, CC,
+    B, OrigRetInfo, SplitRetInfos, DL, CC, true,
     [&](ArrayRef<Register> Regs, Register SrcReg, LLT LLTy, LLT PartLLT,
         int VTSplitIdx) {
       unpackRegsToOrigType(B, Regs, SrcReg,
@@ -730,7 +837,7 @@ bool AMDGPUCallLowering::lowerFormalArguments(
     setArgFlags(OrigArg, OrigArgIdx, DL, F);
 
     splitToValueTypes(
-      B, OrigArg, OrigArgIdx, SplitArgs, DL, CC,
+      B, OrigArg, SplitArgs, DL, CC, false,
       // FIXME: We should probably be passing multiple registers to
       // handleAssignments to do this
       [&](ArrayRef<Register> Regs, Register DstReg,
@@ -818,3 +925,328 @@ bool AMDGPUCallLowering::lowerFormalArguments(
 
   return true;
 }
+
+bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder,
+                                           CCState &CCInfo,
+                                           SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs,
+                                           CallLoweringInfo &Info) const {
+  MachineFunction &MF = MIRBuilder.getMF();
+
+  const AMDGPUFunctionArgInfo *CalleeArgInfo
+    = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo;
+
+  const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+  const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo();
+
+
+  // TODO: Unify with private memory register handling. This is complicated by
+  // the fact that at least in kernels, the input argument is not necessarily
+  // in the same location as the input.
+  AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
+    AMDGPUFunctionArgInfo::DISPATCH_PTR,
+    AMDGPUFunctionArgInfo::QUEUE_PTR,
+    AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR,
+    AMDGPUFunctionArgInfo::DISPATCH_ID,
+    AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
+    AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
+    AMDGPUFunctionArgInfo::WORKGROUP_ID_Z
+  };
+
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+
+  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
+  const AMDGPULegalizerInfo *LI
+    = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo());
+
+  for (auto InputID : InputRegs) {
+    const ArgDescriptor *OutgoingArg;
+    const TargetRegisterClass *ArgRC;
+    LLT ArgTy;
+
+    std::tie(OutgoingArg, ArgRC, ArgTy) =
+        CalleeArgInfo->getPreloadedValue(InputID);
+    if (!OutgoingArg)
+      continue;
+
+    const ArgDescriptor *IncomingArg;
+    const TargetRegisterClass *IncomingArgRC;
+    std::tie(IncomingArg, IncomingArgRC, ArgTy) =
+        CallerArgInfo.getPreloadedValue(InputID);
+    assert(IncomingArgRC == ArgRC);
+
+    Register InputReg = MRI.createGenericVirtualRegister(ArgTy);
+
+    if (IncomingArg) {
+      LI->loadInputValue(InputReg, MIRBuilder, IncomingArg);
+    } else {
+      assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
+      LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder);
+    }
+
+    if (OutgoingArg->isRegister()) {
+      ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg);
+      if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
+        report_fatal_error("failed to allocate implicit input argument");
+    } else {
+      LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n");
+      return false;
+    }
+  }
+
+  // Pack workitem IDs into a single register or pass it as is if already
+  // packed.
+  const ArgDescriptor *OutgoingArg;
+  const TargetRegisterClass *ArgRC;
+  LLT ArgTy;
+
+  std::tie(OutgoingArg, ArgRC, ArgTy) =
+      CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
+  if (!OutgoingArg)
+    std::tie(OutgoingArg, ArgRC, ArgTy) =
+        CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
+  if (!OutgoingArg)
+    std::tie(OutgoingArg, ArgRC, ArgTy) =
+        CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
+  if (!OutgoingArg)
+    return false;
+
+  const ArgDescriptor *IncomingArgX = std::get<0>(
+      CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X));
+  const ArgDescriptor *IncomingArgY = std::get<0>(
+      CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y));
+  const ArgDescriptor *IncomingArgZ = std::get<0>(
+      CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z));
+
+  const LLT S32 = LLT::scalar(32);
+
+  // If incoming ids are not packed we need to pack them.
+  // FIXME: Should consider known workgroup size to eliminate known 0 cases.
+  Register InputReg;
+  if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX) {
+    InputReg = MRI.createGenericVirtualRegister(S32);
+    LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX);
+  }
+
+  if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY) {
+    Register Y = MRI.createGenericVirtualRegister(S32);
+    LI->loadInputValue(Y, MIRBuilder, IncomingArgY);
+
+    Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0);
+    InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y;
+  }
+
+  if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ) {
+    Register Z = MRI.createGenericVirtualRegister(S32);
+    LI->loadInputValue(Z, MIRBuilder, IncomingArgZ);
+
+    Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0);
+    InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z;
+  }
+
+  if (!InputReg) {
+    InputReg = MRI.createGenericVirtualRegister(S32);
+
+    // Workitem ids are already packed, any of present incoming arguments will
+    // carry all required fields.
+    ArgDescriptor IncomingArg = ArgDescriptor::createArg(
+      IncomingArgX ? *IncomingArgX :
+        IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u);
+    LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg);
+  }
+
+  if (OutgoingArg->isRegister()) {
+    ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg);
+    if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
+      report_fatal_error("failed to allocate implicit input argument");
+  } else {
+    LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n");
+    return false;
+  }
+
+  return true;
+}
+
+/// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
+/// CC.
+static std::pair<CCAssignFn *, CCAssignFn *>
+getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) {
+  return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
+}
+
+static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
+                              bool IsTailCall) {
+  return AMDGPU::SI_CALL;
+}
+
+// Add operands to call instruction to track the callee.
+static bool addCallTargetOperands(MachineInstrBuilder &CallInst,
+                                  MachineIRBuilder &MIRBuilder,
+                                  AMDGPUCallLowering::CallLoweringInfo &Info) {
+  if (Info.Callee.isReg()) {
+    CallInst.addImm(0);
+    CallInst.add(Info.Callee);
+  } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) {
+    // The call lowering lightly assumed we can directly encode a call target in
+    // the instruction, which is not the case. Materialize the address here.
+    const GlobalValue *GV = Info.Callee.getGlobal();
+    auto Ptr = MIRBuilder.buildGlobalValue(
+      LLT::pointer(GV->getAddressSpace(), 64), GV);
+    CallInst.addReg(Ptr.getReg(0));
+    CallInst.add(Info.Callee);
+  } else
+    return false;
+
+  return true;
+}
+
+bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
+                                   CallLoweringInfo &Info) const {
+  if (!AMDGPUTargetMachine::EnableFixedFunctionABI) {
+    LLVM_DEBUG(dbgs() << "Variable function ABI not implemented\n");
+    return false;
+  }
+
+  if (Info.IsVarArg) {
+    LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n");
+    return false;
+  }
+
+  MachineFunction &MF = MIRBuilder.getMF();
+  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
+  const SIRegisterInfo *TRI = ST.getRegisterInfo();
+
+  const Function &F = MF.getFunction();
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+  const SITargetLowering &TLI = *getTLI<SITargetLowering>();
+  const DataLayout &DL = F.getParent()->getDataLayout();
+
+  if (AMDGPU::isShader(F.getCallingConv())) {
+    LLVM_DEBUG(dbgs() << "Unhandled call from graphics shader\n");
+    return false;
+  }
+
+  SmallVector<ArgInfo, 8> OutArgs;
+  SmallVector<ArgInfo, 4> SplitRetInfos;
+
+  for (auto &OrigArg : Info.OrigArgs) {
+    splitToValueTypes(
+      MIRBuilder, OrigArg, OutArgs, DL, Info.CallConv, true,
+      // FIXME: We should probably be passing multiple registers to
+      // handleAssignments to do this
+      [&](ArrayRef<Register> Regs, Register SrcReg, LLT LLTy, LLT PartLLT,
+          int VTSplitIdx) {
+        unpackRegsToOrigType(MIRBuilder, Regs, SrcReg, OrigArg, LLTy, PartLLT);
+      });
+  }
+
+  SmallVector<ArgInfo, 8> InArgs;
+  if (!Info.OrigRet.Ty->isVoidTy()) {
+    LLVM_DEBUG(dbgs() << "Call return values not yet handled\n");
+    return false;
+  }
+
+  // If we can lower as a tail call, do that instead.
+  bool CanTailCallOpt = false;
+
+  // We must emit a tail call if we have musttail.
+  if (Info.IsMustTailCall && !CanTailCallOpt) {
+    LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");
+    return false;
+  }
+
+  // Find out which ABI gets to decide where things go.
+  CCAssignFn *AssignFnFixed;
+  CCAssignFn *AssignFnVarArg;
+  std::tie(AssignFnFixed, AssignFnVarArg) =
+      getAssignFnsForCC(Info.CallConv, TLI);
+
+  MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP)
+    .addImm(0)
+    .addImm(0);
+
+  // Create a temporarily-floating call instruction so we can add the implicit
+  // uses of arg registers.
+  unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false);
+
+  auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
+  MIB.addDef(TRI->getReturnAddressReg(MF));
+
+  if (!addCallTargetOperands(MIB, MIRBuilder, Info))
+    return false;
+
+  // Tell the call which registers are clobbered.
+  const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv);
+  MIB.addRegMask(Mask);
+
+  SmallVector<CCValAssign, 16> ArgLocs;
+  CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext());
+
+  // We could pass MIB and directly add the implicit uses to the call
+  // now. However, as an aesthetic choice, place implicit argument operands
+  // after the ordinary user argument registers.
+  SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs;
+
+  if (AMDGPUTargetMachine::EnableFixedFunctionABI) {
+    // With a fixed ABI, allocate fixed registers before user arguments.
+    if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info))
+      return false;
+  }
+
+  // Do the actual argument marshalling.
+  SmallVector<Register, 8> PhysRegs;
+  OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
+                             AssignFnVarArg, false);
+  if (!handleAssignments(CCInfo, ArgLocs, MIRBuilder, OutArgs, Handler))
+    return false;
+
+  const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+
+  // Insert copies for the SRD. In the HSA case, this should be an identity
+  // copy.
+  auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::vector(4, 32),
+                                             MFI->getScratchRSrcReg());
+  MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
+  MIB.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit);
+
+  for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) {
+    MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second);
+    MIB.addReg(ArgReg.first, RegState::Implicit);
+  }
+
+  // Get a count of how many bytes are to be pushed on the stack.
+  unsigned NumBytes = CCInfo.getNextStackOffset();
+
+  // Now we can add the actual call instruction to the correct position.
+  MIRBuilder.insertInstr(MIB);
+
+  // If Callee is a reg, since it is used by a target specific
+  // instruction, it must have a register class matching the
+  // constraint of that instruction.
+
+  // FIXME: We should define regbankselectable call instructions to handle
+  // divergent call targets.
+  if (MIB->getOperand(1).isReg()) {
+    MIB->getOperand(1).setReg(constrainOperandRegClass(
+        MF, *TRI, MRI, *ST.getInstrInfo(),
+        *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1),
+        1));
+  }
+
+  // Finally we can copy the returned value back into its virtual-register. In
+  // symmetry with the arguments, the physical register must be an
+  // implicit-define of the call instruction.
+  if (!Info.OrigRet.Ty->isVoidTy()) {
+    CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv,
+                                                      Info.IsVarArg);
+    CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
+    if (!handleAssignments(MIRBuilder, InArgs, Handler))
+      return false;
+  }
+
+  uint64_t CalleePopBytes = NumBytes;
+  MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN)
+    .addImm(0)
+    .addImm(CalleePopBytes);
+
+  return true;
+}

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h
index 446619d1502e..4aaf0dc55f41 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h
@@ -34,10 +34,10 @@ class AMDGPUCallLowering: public CallLowering {
 
   void splitToValueTypes(MachineIRBuilder &B,
                          const ArgInfo &OrigArgInfo,
-                         unsigned OrigArgIdx,
                          SmallVectorImpl<ArgInfo> &SplitArgs,
                          const DataLayout &DL,
                          CallingConv::ID CallConv,
+                         bool IsOutgoing,
                          SplitArgTy SplitArg) const;
 
   bool lowerReturnVal(MachineIRBuilder &B, const Value *Val,
@@ -54,6 +54,15 @@ class AMDGPUCallLowering: public CallLowering {
 
   bool lowerFormalArguments(MachineIRBuilder &B, const Function &F,
                             ArrayRef<ArrayRef<Register>> VRegs) const override;
+
+  bool passSpecialInputs(MachineIRBuilder &MIRBuilder,
+                         CCState &CCInfo,
+                         SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs,
+                         CallLoweringInfo &Info) const;
+
+  bool lowerCall(MachineIRBuilder &MIRBuilder,
+                 CallLoweringInfo &Info) const override;
+
   static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
   static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
 };

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 99b040e7b49d..97d8cb01e7b5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -3112,19 +3112,13 @@ bool AMDGPULegalizerInfo::legalizeFDIVFastIntrin(MachineInstr &MI,
   return true;
 }
 
-bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
-                                                 MachineRegisterInfo &MRI,
-                                                 MachineIRBuilder &B) const {
+bool AMDGPULegalizerInfo::getImplicitArgPtr(Register DstReg,
+                                            MachineRegisterInfo &MRI,
+                                            MachineIRBuilder &B) const {
   const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
-  if (!MFI->isEntryFunction()) {
-    return legalizePreloadedArgIntrin(MI, MRI, B,
-                                      AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
-  }
-
   uint64_t Offset =
     ST.getTargetLowering()->getImplicitParameterOffset(
       B.getMF(), AMDGPUTargetLowering::FIRST_IMPLICIT);
-  Register DstReg = MI.getOperand(0).getReg();
   LLT DstTy = MRI.getType(DstReg);
   LLT IdxTy = LLT::scalar(DstTy.getSizeInBits());
 
@@ -3140,7 +3134,24 @@ bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
   if (!loadInputValue(KernargPtrReg, B, Arg))
     return false;
 
+  // FIXME: This should be nuw
   B.buildPtrAdd(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
+  return true;
+}
+
+bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
+                                                 MachineRegisterInfo &MRI,
+                                                 MachineIRBuilder &B) const {
+  const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
+  if (!MFI->isEntryFunction()) {
+    return legalizePreloadedArgIntrin(MI, MRI, B,
+                                      AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
+  }
+
+  Register DstReg = MI.getOperand(0).getReg();
+  if (!getImplicitArgPtr(DstReg, MRI, B))
+    return false;
+
   MI.eraseFromParent();
   return true;
 }

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index ce32bbf76b34..d932cab4659a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -133,6 +133,9 @@ class AMDGPULegalizerInfo : public LegalizerInfo {
   bool legalizeFDIVFastIntrin(MachineInstr &MI, MachineRegisterInfo &MRI,
                               MachineIRBuilder &B) const;
 
+  bool getImplicitArgPtr(Register DstReg, MachineRegisterInfo &MRI,
+                         MachineIRBuilder &B) const;
+
   bool legalizeImplicitArgPtr(MachineInstr &MI, MachineRegisterInfo &MRI,
                               MachineIRBuilder &B) const;
   bool legalizeIsAddrSpace(MachineInstr &MI, MachineRegisterInfo &MRI,

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll
new file mode 100644
index 000000000000..aa0850a5dbe5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll
@@ -0,0 +1,1247 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -amdgpu-fixed-function-abi -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope -check-prefix=GFX900 %s
+; RUN: llc -global-isel -amdgpu-fixed-function-abi -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope -check-prefix=GFX908 %s
+
+; Workitem IDs are passed to the kernel 
diff erently for gfx908
+
+declare hidden void @external_void_func_void() #0
+declare hidden void @external_void_func_i32(i32) #0
+declare hidden void @external_void_func_v32i32(<32 x i32>) #0
+
+define amdgpu_kernel void @test_call_external_void_func_i32([17 x i8]) #0 {
+  ; GFX900-LABEL: name: test_call_external_void_func_i32
+  ; GFX900: bb.1 (%ir-block.1):
+  ; GFX900:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX900:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX900:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX900:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX900:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX900:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX900:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX900:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX900:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX900:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX900:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX900:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX900:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; GFX900:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX900:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX900:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX900:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX900:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
+  ; GFX900:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX900:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX900:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX900:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX900:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX900:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX900:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX900:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX900:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX900:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX900:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX900:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX900:   $vgpr0 = COPY [[C]](s32)
+  ; GFX900:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX900:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX900:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX900:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX900:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX900:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX900:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX900:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX900:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX900:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX900:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX900:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX900:   S_ENDPGM 0
+  ; GFX908-LABEL: name: test_call_external_void_func_i32
+  ; GFX908: bb.1 (%ir-block.1):
+  ; GFX908:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX908:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX908:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX908:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX908:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX908:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX908:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX908:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX908:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX908:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX908:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX908:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX908:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; GFX908:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX908:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX908:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX908:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX908:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
+  ; GFX908:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX908:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX908:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX908:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX908:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX908:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX908:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX908:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX908:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX908:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX908:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX908:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX908:   $vgpr0 = COPY [[C]](s32)
+  ; GFX908:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX908:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX908:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX908:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX908:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX908:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX908:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX908:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX908:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX908:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX908:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX908:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX908:   S_ENDPGM 0
+  call void @external_void_func_i32(i32 42)
+  ret void
+}
+
+define void @test_func_call_external_void_func_i32() #0 {
+  ; GFX900-LABEL: name: test_func_call_external_void_func_i32
+  ; GFX900: bb.1 (%ir-block.0):
+  ; GFX900:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GFX900:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr31
+  ; GFX900:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX900:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
+  ; GFX900:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
+  ; GFX900:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX900:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
+  ; GFX900:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX900:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX900:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
+  ; GFX900:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 99
+  ; GFX900:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX900:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX900:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GFX900:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GFX900:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GFX900:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GFX900:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GFX900:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900:   $vgpr0 = COPY [[C]](s32)
+  ; GFX900:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GFX900:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
+  ; GFX900:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
+  ; GFX900:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
+  ; GFX900:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
+  ; GFX900:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
+  ; GFX900:   $sgpr12 = COPY [[COPY13]](s32)
+  ; GFX900:   $sgpr13 = COPY [[COPY14]](s32)
+  ; GFX900:   $sgpr14 = COPY [[COPY15]](s32)
+  ; GFX900:   $vgpr31 = COPY [[COPY16]](s32)
+  ; GFX900:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX900:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX900:   [[COPY18:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
+  ; GFX900:   S_SETPC_B64_return [[COPY18]]
+  ; GFX908-LABEL: name: test_func_call_external_void_func_i32
+  ; GFX908: bb.1 (%ir-block.0):
+  ; GFX908:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GFX908:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr31
+  ; GFX908:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX908:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
+  ; GFX908:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
+  ; GFX908:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX908:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
+  ; GFX908:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX908:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX908:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
+  ; GFX908:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 99
+  ; GFX908:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX908:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX908:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GFX908:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GFX908:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GFX908:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GFX908:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GFX908:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908:   $vgpr0 = COPY [[C]](s32)
+  ; GFX908:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GFX908:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
+  ; GFX908:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
+  ; GFX908:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
+  ; GFX908:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
+  ; GFX908:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
+  ; GFX908:   $sgpr12 = COPY [[COPY13]](s32)
+  ; GFX908:   $sgpr13 = COPY [[COPY14]](s32)
+  ; GFX908:   $sgpr14 = COPY [[COPY15]](s32)
+  ; GFX908:   $vgpr31 = COPY [[COPY16]](s32)
+  ; GFX908:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX908:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX908:   [[COPY18:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
+  ; GFX908:   S_SETPC_B64_return [[COPY18]]
+  call void @external_void_func_i32(i32 99)
+  ret void
+}
+
+; Explicit argument is split between registers ad the stack due to v31
+; being used for workitem IDs.
+define amdgpu_kernel void @test_call_external_void_func_v32i32([17 x i8]) #0 {
+  ; GFX900-LABEL: name: test_call_external_void_func_v32i32
+  ; GFX900: bb.1 (%ir-block.1):
+  ; GFX900:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX900:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX900:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX900:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX900:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX900:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX900:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX900:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX900:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX900:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX900:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX900:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; GFX900:   [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+  ; GFX900:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; GFX900:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<32 x s32>)
+  ; GFX900:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX900:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v32i32
+  ; GFX900:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX900:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX900:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
+  ; GFX900:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX900:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX900:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX900:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX900:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX900:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX900:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX900:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX900:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX900:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX900:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX900:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX900:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX900:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX900:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX900:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX900:   $vgpr4 = COPY [[UV4]](s32)
+  ; GFX900:   $vgpr5 = COPY [[UV5]](s32)
+  ; GFX900:   $vgpr6 = COPY [[UV6]](s32)
+  ; GFX900:   $vgpr7 = COPY [[UV7]](s32)
+  ; GFX900:   $vgpr8 = COPY [[UV8]](s32)
+  ; GFX900:   $vgpr9 = COPY [[UV9]](s32)
+  ; GFX900:   $vgpr10 = COPY [[UV10]](s32)
+  ; GFX900:   $vgpr11 = COPY [[UV11]](s32)
+  ; GFX900:   $vgpr12 = COPY [[UV12]](s32)
+  ; GFX900:   $vgpr13 = COPY [[UV13]](s32)
+  ; GFX900:   $vgpr14 = COPY [[UV14]](s32)
+  ; GFX900:   $vgpr15 = COPY [[UV15]](s32)
+  ; GFX900:   $vgpr16 = COPY [[UV16]](s32)
+  ; GFX900:   $vgpr17 = COPY [[UV17]](s32)
+  ; GFX900:   $vgpr18 = COPY [[UV18]](s32)
+  ; GFX900:   $vgpr19 = COPY [[UV19]](s32)
+  ; GFX900:   $vgpr20 = COPY [[UV20]](s32)
+  ; GFX900:   $vgpr21 = COPY [[UV21]](s32)
+  ; GFX900:   $vgpr22 = COPY [[UV22]](s32)
+  ; GFX900:   $vgpr23 = COPY [[UV23]](s32)
+  ; GFX900:   $vgpr24 = COPY [[UV24]](s32)
+  ; GFX900:   $vgpr25 = COPY [[UV25]](s32)
+  ; GFX900:   $vgpr26 = COPY [[UV26]](s32)
+  ; GFX900:   $vgpr27 = COPY [[UV27]](s32)
+  ; GFX900:   $vgpr28 = COPY [[UV28]](s32)
+  ; GFX900:   $vgpr29 = COPY [[UV29]](s32)
+  ; GFX900:   $vgpr30 = COPY [[UV30]](s32)
+  ; GFX900:   [[COPY20:%[0-9]+]]:_(p5) = COPY $sp_reg
+  ; GFX900:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; GFX900:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY20]], [[C4]](s32)
+  ; GFX900:   G_STORE [[UV31]](s32), [[PTR_ADD1]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; GFX900:   [[COPY21:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX900:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY21]](<4 x s32>)
+  ; GFX900:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX900:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX900:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX900:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX900:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX900:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX900:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX900:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX900:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v32i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX900:   ADJCALLSTACKDOWN 0, 4, implicit-def $scc
+  ; GFX900:   S_ENDPGM 0
+  ; GFX908-LABEL: name: test_call_external_void_func_v32i32
+  ; GFX908: bb.1 (%ir-block.1):
+  ; GFX908:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX908:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX908:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX908:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX908:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX908:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX908:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX908:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX908:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX908:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX908:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX908:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; GFX908:   [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+  ; GFX908:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; GFX908:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<32 x s32>)
+  ; GFX908:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX908:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v32i32
+  ; GFX908:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX908:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX908:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
+  ; GFX908:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX908:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX908:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX908:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX908:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX908:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX908:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX908:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX908:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX908:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX908:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX908:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX908:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX908:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX908:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX908:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX908:   $vgpr4 = COPY [[UV4]](s32)
+  ; GFX908:   $vgpr5 = COPY [[UV5]](s32)
+  ; GFX908:   $vgpr6 = COPY [[UV6]](s32)
+  ; GFX908:   $vgpr7 = COPY [[UV7]](s32)
+  ; GFX908:   $vgpr8 = COPY [[UV8]](s32)
+  ; GFX908:   $vgpr9 = COPY [[UV9]](s32)
+  ; GFX908:   $vgpr10 = COPY [[UV10]](s32)
+  ; GFX908:   $vgpr11 = COPY [[UV11]](s32)
+  ; GFX908:   $vgpr12 = COPY [[UV12]](s32)
+  ; GFX908:   $vgpr13 = COPY [[UV13]](s32)
+  ; GFX908:   $vgpr14 = COPY [[UV14]](s32)
+  ; GFX908:   $vgpr15 = COPY [[UV15]](s32)
+  ; GFX908:   $vgpr16 = COPY [[UV16]](s32)
+  ; GFX908:   $vgpr17 = COPY [[UV17]](s32)
+  ; GFX908:   $vgpr18 = COPY [[UV18]](s32)
+  ; GFX908:   $vgpr19 = COPY [[UV19]](s32)
+  ; GFX908:   $vgpr20 = COPY [[UV20]](s32)
+  ; GFX908:   $vgpr21 = COPY [[UV21]](s32)
+  ; GFX908:   $vgpr22 = COPY [[UV22]](s32)
+  ; GFX908:   $vgpr23 = COPY [[UV23]](s32)
+  ; GFX908:   $vgpr24 = COPY [[UV24]](s32)
+  ; GFX908:   $vgpr25 = COPY [[UV25]](s32)
+  ; GFX908:   $vgpr26 = COPY [[UV26]](s32)
+  ; GFX908:   $vgpr27 = COPY [[UV27]](s32)
+  ; GFX908:   $vgpr28 = COPY [[UV28]](s32)
+  ; GFX908:   $vgpr29 = COPY [[UV29]](s32)
+  ; GFX908:   $vgpr30 = COPY [[UV30]](s32)
+  ; GFX908:   [[COPY20:%[0-9]+]]:_(p5) = COPY $sp_reg
+  ; GFX908:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; GFX908:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY20]], [[C4]](s32)
+  ; GFX908:   G_STORE [[UV31]](s32), [[PTR_ADD1]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; GFX908:   [[COPY21:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX908:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY21]](<4 x s32>)
+  ; GFX908:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX908:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX908:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX908:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX908:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX908:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX908:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX908:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX908:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v32i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX908:   ADJCALLSTACKDOWN 0, 4, implicit-def $scc
+  ; GFX908:   S_ENDPGM 0
+  call void @external_void_func_v32i32(<32 x i32> zeroinitializer)
+  ret void
+}
+
+define void @test_func_call_external_void_func_v32i32([17 x i8]) #0 {
+  ; GFX900-LABEL: name: test_func_call_external_void_func_v32i32
+  ; GFX900: bb.1 (%ir-block.1):
+  ; GFX900:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GFX900:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr31
+  ; GFX900:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX900:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
+  ; GFX900:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
+  ; GFX900:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX900:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
+  ; GFX900:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX900:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX900:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX900:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
+  ; GFX900:   [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC]](s16)
+  ; GFX900:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX900:   [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32)
+  ; GFX900:   [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC2]](s16)
+  ; GFX900:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX900:   [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY10]](s32)
+  ; GFX900:   [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC4]](s16)
+  ; GFX900:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX900:   [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY11]](s32)
+  ; GFX900:   [[TRUNC7:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC6]](s16)
+  ; GFX900:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX900:   [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
+  ; GFX900:   [[TRUNC9:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC8]](s16)
+  ; GFX900:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX900:   [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
+  ; GFX900:   [[TRUNC11:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC10]](s16)
+  ; GFX900:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX900:   [[TRUNC12:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
+  ; GFX900:   [[TRUNC13:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC12]](s16)
+  ; GFX900:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr7
+  ; GFX900:   [[TRUNC14:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
+  ; GFX900:   [[TRUNC15:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC14]](s16)
+  ; GFX900:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr8
+  ; GFX900:   [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
+  ; GFX900:   [[TRUNC17:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC16]](s16)
+  ; GFX900:   [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr9
+  ; GFX900:   [[TRUNC18:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
+  ; GFX900:   [[TRUNC19:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC18]](s16)
+  ; GFX900:   [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr10
+  ; GFX900:   [[TRUNC20:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
+  ; GFX900:   [[TRUNC21:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC20]](s16)
+  ; GFX900:   [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr11
+  ; GFX900:   [[TRUNC22:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
+  ; GFX900:   [[TRUNC23:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC22]](s16)
+  ; GFX900:   [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr12
+  ; GFX900:   [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
+  ; GFX900:   [[TRUNC25:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC24]](s16)
+  ; GFX900:   [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr13
+  ; GFX900:   [[TRUNC26:%[0-9]+]]:_(s16) = G_TRUNC [[COPY21]](s32)
+  ; GFX900:   [[TRUNC27:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC26]](s16)
+  ; GFX900:   [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr14
+  ; GFX900:   [[TRUNC28:%[0-9]+]]:_(s16) = G_TRUNC [[COPY22]](s32)
+  ; GFX900:   [[TRUNC29:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC28]](s16)
+  ; GFX900:   [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr15
+  ; GFX900:   [[TRUNC30:%[0-9]+]]:_(s16) = G_TRUNC [[COPY23]](s32)
+  ; GFX900:   [[TRUNC31:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC30]](s16)
+  ; GFX900:   [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr16
+  ; GFX900:   [[TRUNC32:%[0-9]+]]:_(s16) = G_TRUNC [[COPY24]](s32)
+  ; GFX900:   [[TRUNC33:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC32]](s16)
+  ; GFX900:   [[COPY25:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
+  ; GFX900:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; GFX900:   [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+  ; GFX900:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<32 x s32>)
+  ; GFX900:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX900:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v32i32
+  ; GFX900:   [[COPY26:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900:   [[COPY27:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GFX900:   [[COPY28:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GFX900:   [[COPY29:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GFX900:   [[COPY30:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900:   [[COPY31:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GFX900:   [[COPY32:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GFX900:   [[COPY33:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX900:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX900:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX900:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX900:   $vgpr4 = COPY [[UV4]](s32)
+  ; GFX900:   $vgpr5 = COPY [[UV5]](s32)
+  ; GFX900:   $vgpr6 = COPY [[UV6]](s32)
+  ; GFX900:   $vgpr7 = COPY [[UV7]](s32)
+  ; GFX900:   $vgpr8 = COPY [[UV8]](s32)
+  ; GFX900:   $vgpr9 = COPY [[UV9]](s32)
+  ; GFX900:   $vgpr10 = COPY [[UV10]](s32)
+  ; GFX900:   $vgpr11 = COPY [[UV11]](s32)
+  ; GFX900:   $vgpr12 = COPY [[UV12]](s32)
+  ; GFX900:   $vgpr13 = COPY [[UV13]](s32)
+  ; GFX900:   $vgpr14 = COPY [[UV14]](s32)
+  ; GFX900:   $vgpr15 = COPY [[UV15]](s32)
+  ; GFX900:   $vgpr16 = COPY [[UV16]](s32)
+  ; GFX900:   $vgpr17 = COPY [[UV17]](s32)
+  ; GFX900:   $vgpr18 = COPY [[UV18]](s32)
+  ; GFX900:   $vgpr19 = COPY [[UV19]](s32)
+  ; GFX900:   $vgpr20 = COPY [[UV20]](s32)
+  ; GFX900:   $vgpr21 = COPY [[UV21]](s32)
+  ; GFX900:   $vgpr22 = COPY [[UV22]](s32)
+  ; GFX900:   $vgpr23 = COPY [[UV23]](s32)
+  ; GFX900:   $vgpr24 = COPY [[UV24]](s32)
+  ; GFX900:   $vgpr25 = COPY [[UV25]](s32)
+  ; GFX900:   $vgpr26 = COPY [[UV26]](s32)
+  ; GFX900:   $vgpr27 = COPY [[UV27]](s32)
+  ; GFX900:   $vgpr28 = COPY [[UV28]](s32)
+  ; GFX900:   $vgpr29 = COPY [[UV29]](s32)
+  ; GFX900:   $vgpr30 = COPY [[UV30]](s32)
+  ; GFX900:   [[COPY34:%[0-9]+]]:_(p5) = COPY $sgpr32
+  ; GFX900:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; GFX900:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY34]], [[C1]](s32)
+  ; GFX900:   G_STORE [[UV31]](s32), [[PTR_ADD]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; GFX900:   [[COPY35:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GFX900:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY35]](<4 x s32>)
+  ; GFX900:   $sgpr4_sgpr5 = COPY [[COPY26]](p4)
+  ; GFX900:   $sgpr6_sgpr7 = COPY [[COPY27]](p4)
+  ; GFX900:   $sgpr8_sgpr9 = COPY [[COPY28]](p4)
+  ; GFX900:   $sgpr10_sgpr11 = COPY [[COPY29]](s64)
+  ; GFX900:   $sgpr12 = COPY [[COPY30]](s32)
+  ; GFX900:   $sgpr13 = COPY [[COPY31]](s32)
+  ; GFX900:   $sgpr14 = COPY [[COPY32]](s32)
+  ; GFX900:   $vgpr31 = COPY [[COPY33]](s32)
+  ; GFX900:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v32i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX900:   ADJCALLSTACKDOWN 0, 4, implicit-def $scc
+  ; GFX900:   [[COPY36:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY25]]
+  ; GFX900:   S_SETPC_B64_return [[COPY36]]
+  ; GFX908-LABEL: name: test_func_call_external_void_func_v32i32
+  ; GFX908: bb.1 (%ir-block.1):
+  ; GFX908:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GFX908:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr31
+  ; GFX908:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX908:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
+  ; GFX908:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
+  ; GFX908:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX908:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
+  ; GFX908:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX908:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX908:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX908:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
+  ; GFX908:   [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC]](s16)
+  ; GFX908:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX908:   [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32)
+  ; GFX908:   [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC2]](s16)
+  ; GFX908:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX908:   [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY10]](s32)
+  ; GFX908:   [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC4]](s16)
+  ; GFX908:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX908:   [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY11]](s32)
+  ; GFX908:   [[TRUNC7:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC6]](s16)
+  ; GFX908:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX908:   [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY12]](s32)
+  ; GFX908:   [[TRUNC9:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC8]](s16)
+  ; GFX908:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX908:   [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[COPY13]](s32)
+  ; GFX908:   [[TRUNC11:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC10]](s16)
+  ; GFX908:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX908:   [[TRUNC12:%[0-9]+]]:_(s16) = G_TRUNC [[COPY14]](s32)
+  ; GFX908:   [[TRUNC13:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC12]](s16)
+  ; GFX908:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr7
+  ; GFX908:   [[TRUNC14:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
+  ; GFX908:   [[TRUNC15:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC14]](s16)
+  ; GFX908:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr8
+  ; GFX908:   [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[COPY16]](s32)
+  ; GFX908:   [[TRUNC17:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC16]](s16)
+  ; GFX908:   [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr9
+  ; GFX908:   [[TRUNC18:%[0-9]+]]:_(s16) = G_TRUNC [[COPY17]](s32)
+  ; GFX908:   [[TRUNC19:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC18]](s16)
+  ; GFX908:   [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr10
+  ; GFX908:   [[TRUNC20:%[0-9]+]]:_(s16) = G_TRUNC [[COPY18]](s32)
+  ; GFX908:   [[TRUNC21:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC20]](s16)
+  ; GFX908:   [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr11
+  ; GFX908:   [[TRUNC22:%[0-9]+]]:_(s16) = G_TRUNC [[COPY19]](s32)
+  ; GFX908:   [[TRUNC23:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC22]](s16)
+  ; GFX908:   [[COPY20:%[0-9]+]]:_(s32) = COPY $vgpr12
+  ; GFX908:   [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[COPY20]](s32)
+  ; GFX908:   [[TRUNC25:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC24]](s16)
+  ; GFX908:   [[COPY21:%[0-9]+]]:_(s32) = COPY $vgpr13
+  ; GFX908:   [[TRUNC26:%[0-9]+]]:_(s16) = G_TRUNC [[COPY21]](s32)
+  ; GFX908:   [[TRUNC27:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC26]](s16)
+  ; GFX908:   [[COPY22:%[0-9]+]]:_(s32) = COPY $vgpr14
+  ; GFX908:   [[TRUNC28:%[0-9]+]]:_(s16) = G_TRUNC [[COPY22]](s32)
+  ; GFX908:   [[TRUNC29:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC28]](s16)
+  ; GFX908:   [[COPY23:%[0-9]+]]:_(s32) = COPY $vgpr15
+  ; GFX908:   [[TRUNC30:%[0-9]+]]:_(s16) = G_TRUNC [[COPY23]](s32)
+  ; GFX908:   [[TRUNC31:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC30]](s16)
+  ; GFX908:   [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr16
+  ; GFX908:   [[TRUNC32:%[0-9]+]]:_(s16) = G_TRUNC [[COPY24]](s32)
+  ; GFX908:   [[TRUNC33:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC32]](s16)
+  ; GFX908:   [[COPY25:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
+  ; GFX908:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; GFX908:   [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+  ; GFX908:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<32 x s32>)
+  ; GFX908:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX908:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v32i32
+  ; GFX908:   [[COPY26:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908:   [[COPY27:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GFX908:   [[COPY28:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GFX908:   [[COPY29:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GFX908:   [[COPY30:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908:   [[COPY31:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GFX908:   [[COPY32:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GFX908:   [[COPY33:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX908:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX908:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX908:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX908:   $vgpr4 = COPY [[UV4]](s32)
+  ; GFX908:   $vgpr5 = COPY [[UV5]](s32)
+  ; GFX908:   $vgpr6 = COPY [[UV6]](s32)
+  ; GFX908:   $vgpr7 = COPY [[UV7]](s32)
+  ; GFX908:   $vgpr8 = COPY [[UV8]](s32)
+  ; GFX908:   $vgpr9 = COPY [[UV9]](s32)
+  ; GFX908:   $vgpr10 = COPY [[UV10]](s32)
+  ; GFX908:   $vgpr11 = COPY [[UV11]](s32)
+  ; GFX908:   $vgpr12 = COPY [[UV12]](s32)
+  ; GFX908:   $vgpr13 = COPY [[UV13]](s32)
+  ; GFX908:   $vgpr14 = COPY [[UV14]](s32)
+  ; GFX908:   $vgpr15 = COPY [[UV15]](s32)
+  ; GFX908:   $vgpr16 = COPY [[UV16]](s32)
+  ; GFX908:   $vgpr17 = COPY [[UV17]](s32)
+  ; GFX908:   $vgpr18 = COPY [[UV18]](s32)
+  ; GFX908:   $vgpr19 = COPY [[UV19]](s32)
+  ; GFX908:   $vgpr20 = COPY [[UV20]](s32)
+  ; GFX908:   $vgpr21 = COPY [[UV21]](s32)
+  ; GFX908:   $vgpr22 = COPY [[UV22]](s32)
+  ; GFX908:   $vgpr23 = COPY [[UV23]](s32)
+  ; GFX908:   $vgpr24 = COPY [[UV24]](s32)
+  ; GFX908:   $vgpr25 = COPY [[UV25]](s32)
+  ; GFX908:   $vgpr26 = COPY [[UV26]](s32)
+  ; GFX908:   $vgpr27 = COPY [[UV27]](s32)
+  ; GFX908:   $vgpr28 = COPY [[UV28]](s32)
+  ; GFX908:   $vgpr29 = COPY [[UV29]](s32)
+  ; GFX908:   $vgpr30 = COPY [[UV30]](s32)
+  ; GFX908:   [[COPY34:%[0-9]+]]:_(p5) = COPY $sgpr32
+  ; GFX908:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; GFX908:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY34]], [[C1]](s32)
+  ; GFX908:   G_STORE [[UV31]](s32), [[PTR_ADD]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; GFX908:   [[COPY35:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GFX908:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY35]](<4 x s32>)
+  ; GFX908:   $sgpr4_sgpr5 = COPY [[COPY26]](p4)
+  ; GFX908:   $sgpr6_sgpr7 = COPY [[COPY27]](p4)
+  ; GFX908:   $sgpr8_sgpr9 = COPY [[COPY28]](p4)
+  ; GFX908:   $sgpr10_sgpr11 = COPY [[COPY29]](s64)
+  ; GFX908:   $sgpr12 = COPY [[COPY30]](s32)
+  ; GFX908:   $sgpr13 = COPY [[COPY31]](s32)
+  ; GFX908:   $sgpr14 = COPY [[COPY32]](s32)
+  ; GFX908:   $vgpr31 = COPY [[COPY33]](s32)
+  ; GFX908:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v32i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX908:   ADJCALLSTACKDOWN 0, 4, implicit-def $scc
+  ; GFX908:   [[COPY36:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY25]]
+  ; GFX908:   S_SETPC_B64_return [[COPY36]]
+  call void @external_void_func_v32i32(<32 x i32> zeroinitializer)
+  ret void
+}
+
+; FIXME: Should fold out parts with known 0 id.
+
+define amdgpu_kernel void @test_only_workitem_id_x() #0 !reqd_work_group_size !0 {
+  ; GFX900-LABEL: name: test_only_workitem_id_x
+  ; GFX900: bb.1 (%ir-block.0):
+  ; GFX900:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX900:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX900:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX900:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX900:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX900:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX900:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX900:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX900:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX900:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX900:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX900:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX900:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX900:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX900:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX900:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX900:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX900:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX900:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX900:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX900:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX900:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX900:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX900:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX900:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX900:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX900:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX900:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX900:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX900:   $vgpr0 = COPY [[C]](s32)
+  ; GFX900:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX900:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX900:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX900:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX900:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX900:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX900:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX900:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX900:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX900:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX900:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX900:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX900:   S_ENDPGM 0
+  ; GFX908-LABEL: name: test_only_workitem_id_x
+  ; GFX908: bb.1 (%ir-block.0):
+  ; GFX908:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX908:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX908:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX908:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX908:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX908:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX908:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX908:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX908:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX908:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX908:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX908:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX908:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX908:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX908:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX908:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX908:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX908:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX908:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX908:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX908:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX908:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX908:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX908:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX908:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX908:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX908:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX908:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX908:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX908:   $vgpr0 = COPY [[C]](s32)
+  ; GFX908:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX908:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX908:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX908:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX908:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX908:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX908:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX908:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX908:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX908:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX908:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX908:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX908:   S_ENDPGM 0
+  call void @external_void_func_i32(i32 42)
+  ret void
+}
+
+define amdgpu_kernel void @test_only_workitem_id_y() #0 !reqd_work_group_size !1 {
+  ; GFX900-LABEL: name: test_only_workitem_id_y
+  ; GFX900: bb.1 (%ir-block.0):
+  ; GFX900:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX900:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX900:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX900:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX900:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX900:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX900:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX900:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX900:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX900:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX900:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX900:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX900:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX900:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX900:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX900:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX900:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX900:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX900:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX900:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX900:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX900:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX900:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX900:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX900:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX900:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX900:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX900:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX900:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX900:   $vgpr0 = COPY [[C]](s32)
+  ; GFX900:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX900:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX900:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX900:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX900:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX900:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX900:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX900:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX900:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX900:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX900:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX900:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX900:   S_ENDPGM 0
+  ; GFX908-LABEL: name: test_only_workitem_id_y
+  ; GFX908: bb.1 (%ir-block.0):
+  ; GFX908:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX908:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX908:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX908:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX908:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX908:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX908:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX908:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX908:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX908:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX908:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX908:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX908:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX908:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX908:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX908:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX908:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX908:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX908:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX908:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX908:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX908:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX908:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX908:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX908:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX908:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX908:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX908:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX908:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX908:   $vgpr0 = COPY [[C]](s32)
+  ; GFX908:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX908:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX908:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX908:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX908:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX908:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX908:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX908:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX908:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX908:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX908:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX908:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX908:   S_ENDPGM 0
+  call void @external_void_func_i32(i32 42)
+  ret void
+}
+
+define amdgpu_kernel void @test_only_workitem_id_z() #0 !reqd_work_group_size !2 {
+  ; GFX900-LABEL: name: test_only_workitem_id_z
+  ; GFX900: bb.1 (%ir-block.0):
+  ; GFX900:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX900:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX900:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX900:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX900:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX900:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX900:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX900:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX900:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX900:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX900:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX900:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX900:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX900:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX900:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX900:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX900:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX900:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX900:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX900:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX900:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX900:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX900:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX900:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX900:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX900:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX900:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX900:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX900:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX900:   $vgpr0 = COPY [[C]](s32)
+  ; GFX900:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX900:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX900:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX900:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX900:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX900:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX900:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX900:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX900:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX900:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX900:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX900:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX900:   S_ENDPGM 0
+  ; GFX908-LABEL: name: test_only_workitem_id_z
+  ; GFX908: bb.1 (%ir-block.0):
+  ; GFX908:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX908:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX908:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX908:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX908:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX908:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX908:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX908:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX908:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX908:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX908:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX908:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX908:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX908:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX908:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX908:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX908:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX908:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX908:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX908:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX908:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX908:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX908:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX908:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX908:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX908:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX908:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX908:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX908:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX908:   $vgpr0 = COPY [[C]](s32)
+  ; GFX908:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX908:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX908:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX908:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX908:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX908:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX908:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX908:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX908:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX908:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX908:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX908:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX908:   S_ENDPGM 0
+  call void @external_void_func_i32(i32 42)
+  ret void
+}
+
+define amdgpu_kernel void @test_only_workitem_id_xy() #0 !reqd_work_group_size !3 {
+  ; GFX900-LABEL: name: test_only_workitem_id_xy
+  ; GFX900: bb.1 (%ir-block.0):
+  ; GFX900:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX900:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX900:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX900:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX900:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX900:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX900:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX900:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX900:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX900:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX900:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX900:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX900:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX900:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX900:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX900:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX900:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX900:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX900:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX900:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX900:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX900:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX900:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX900:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX900:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX900:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX900:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX900:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX900:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX900:   $vgpr0 = COPY [[C]](s32)
+  ; GFX900:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX900:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX900:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX900:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX900:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX900:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX900:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX900:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX900:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX900:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX900:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX900:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX900:   S_ENDPGM 0
+  ; GFX908-LABEL: name: test_only_workitem_id_xy
+  ; GFX908: bb.1 (%ir-block.0):
+  ; GFX908:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX908:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX908:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX908:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX908:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX908:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX908:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX908:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX908:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX908:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX908:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX908:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX908:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX908:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX908:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX908:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX908:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX908:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX908:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX908:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX908:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX908:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX908:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX908:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX908:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX908:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX908:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX908:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX908:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX908:   $vgpr0 = COPY [[C]](s32)
+  ; GFX908:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX908:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX908:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX908:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX908:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX908:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX908:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX908:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX908:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX908:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX908:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX908:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX908:   S_ENDPGM 0
+  call void @external_void_func_i32(i32 42)
+  ret void
+}
+
+define amdgpu_kernel void @test_only_workitem_id_yz() #0 !reqd_work_group_size !4 {
+  ; GFX900-LABEL: name: test_only_workitem_id_yz
+  ; GFX900: bb.1 (%ir-block.0):
+  ; GFX900:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX900:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX900:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX900:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX900:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX900:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX900:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX900:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX900:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX900:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX900:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX900:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX900:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX900:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX900:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX900:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX900:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX900:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX900:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX900:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX900:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX900:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX900:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX900:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX900:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX900:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX900:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX900:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX900:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX900:   $vgpr0 = COPY [[C]](s32)
+  ; GFX900:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX900:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX900:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX900:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX900:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX900:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX900:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX900:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX900:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX900:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX900:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX900:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX900:   S_ENDPGM 0
+  ; GFX908-LABEL: name: test_only_workitem_id_yz
+  ; GFX908: bb.1 (%ir-block.0):
+  ; GFX908:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX908:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX908:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX908:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX908:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX908:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX908:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX908:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX908:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX908:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX908:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX908:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX908:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX908:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX908:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX908:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX908:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX908:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX908:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX908:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX908:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX908:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX908:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX908:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX908:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX908:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX908:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX908:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX908:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX908:   $vgpr0 = COPY [[C]](s32)
+  ; GFX908:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX908:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX908:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX908:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX908:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX908:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX908:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX908:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX908:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX908:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX908:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX908:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX908:   S_ENDPGM 0
+  call void @external_void_func_i32(i32 42)
+  ret void
+}
+
+define amdgpu_kernel void @test_only_workitem_id_xz() #0 !reqd_work_group_size !5 {
+  ; GFX900-LABEL: name: test_only_workitem_id_xz
+  ; GFX900: bb.1 (%ir-block.0):
+  ; GFX900:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX900:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX900:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX900:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX900:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX900:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX900:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX900:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX900:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX900:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX900:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX900:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX900:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX900:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX900:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX900:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX900:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX900:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX900:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX900:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX900:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX900:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX900:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX900:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX900:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX900:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX900:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX900:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX900:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX900:   $vgpr0 = COPY [[C]](s32)
+  ; GFX900:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX900:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX900:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX900:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX900:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX900:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX900:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX900:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX900:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX900:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX900:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX900:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX900:   S_ENDPGM 0
+  ; GFX908-LABEL: name: test_only_workitem_id_xz
+  ; GFX908: bb.1 (%ir-block.0):
+  ; GFX908:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; GFX908:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; GFX908:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; GFX908:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; GFX908:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; GFX908:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; GFX908:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; GFX908:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; GFX908:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; GFX908:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; GFX908:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; GFX908:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; GFX908:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; GFX908:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; GFX908:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; GFX908:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; GFX908:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; GFX908:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; GFX908:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; GFX908:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; GFX908:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; GFX908:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; GFX908:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; GFX908:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; GFX908:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; GFX908:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; GFX908:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; GFX908:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; GFX908:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; GFX908:   $vgpr0 = COPY [[C]](s32)
+  ; GFX908:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; GFX908:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GFX908:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GFX908:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GFX908:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; GFX908:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GFX908:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GFX908:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GFX908:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GFX908:   $vgpr31 = COPY [[OR1]](s32)
+  ; GFX908:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; GFX908:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; GFX908:   S_ENDPGM 0
+  call void @external_void_func_i32(i32 42)
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+declare i32 @llvm.amdgcn.workitem.id.y() #1
+declare i32 @llvm.amdgcn.workitem.id.z() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone speculatable willreturn }
+
+!0 = !{i32 64, i32 1, i32 1}
+!1 = !{i32 1, i32 64, i32 1}
+!2 = !{i32 1, i32 1, i32 64}
+!3 = !{i32 32, i32 2, i32 1}
+!4 = !{i32 1, i32 32, i32 2}
+!5 = !{i32 32, i32 1, i32 2}

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll
new file mode 100644
index 000000000000..a6b8e4d61ed0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll
@@ -0,0 +1,4355 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -amdgpu-fixed-function-abi -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope %s
+
+declare hidden void @external_void_func_void() #0
+
+declare hidden void @external_void_func_empty_struct({}, i32) #0
+declare hidden void @external_void_func_empty_array([0 x i8], i32) #0
+
+declare hidden void @external_void_func_i1(i1) #0
+declare hidden void @external_void_func_i1_signext(i1 signext) #0
+declare hidden void @external_void_func_i1_zeroext(i1 zeroext) #0
+
+declare hidden void @external_void_func_i8(i8) #0
+declare hidden void @external_void_func_i8_signext(i8 signext) #0
+declare hidden void @external_void_func_i8_zeroext(i8 zeroext) #0
+
+declare hidden void @external_void_func_i16(i16) #0
+declare hidden void @external_void_func_i16_signext(i16 signext) #0
+declare hidden void @external_void_func_i16_zeroext(i16 zeroext) #0
+
+declare hidden void @external_void_func_i32(i32) #0
+declare hidden void @external_void_func_i64(i64) #0
+declare hidden void @external_void_func_v2i64(<2 x i64>) #0
+declare hidden void @external_void_func_v3i64(<3 x i64>) #0
+declare hidden void @external_void_func_v4i64(<4 x i64>) #0
+
+
+declare hidden void @external_void_func_i48(i48) #0
+declare hidden void @external_void_func_i48_signext(i48 signext) #0
+declare hidden void @external_void_func_i48_zeroext(i48 zeroext) #0
+
+declare hidden void @external_void_func_p0(i8*) #0
+declare hidden void @external_void_func_v2p0(<2 x i8*>) #0
+
+declare hidden void @external_void_func_f16(half) #0
+declare hidden void @external_void_func_f32(float) #0
+declare hidden void @external_void_func_f64(double) #0
+declare hidden void @external_void_func_v2f32(<2 x float>) #0
+declare hidden void @external_void_func_v2f64(<2 x double>) #0
+declare hidden void @external_void_func_v3f32(<3 x float>) #0
+declare hidden void @external_void_func_v3f64(<3 x double>) #0
+declare hidden void @external_void_func_v5f32(<5 x float>) #0
+
+declare hidden void @external_void_func_v2i16(<2 x i16>) #0
+declare hidden void @external_void_func_v2f16(<2 x half>) #0
+declare hidden void @external_void_func_v3i16(<3 x i16>) #0
+declare hidden void @external_void_func_v3f16(<3 x half>) #0
+declare hidden void @external_void_func_v4i16(<4 x i16>) #0
+declare hidden void @external_void_func_v4f16(<4 x half>) #0
+
+declare hidden void @external_void_func_v2i32(<2 x i32>) #0
+declare hidden void @external_void_func_v3i32(<3 x i32>) #0
+declare hidden void @external_void_func_v3i32_i32(<3 x i32>, i32) #0
+declare hidden void @external_void_func_v4i32(<4 x i32>) #0
+declare hidden void @external_void_func_v5i32(<5 x i32>) #0
+declare hidden void @external_void_func_v8i32(<8 x i32>) #0
+declare hidden void @external_void_func_v16i32(<16 x i32>) #0
+declare hidden void @external_void_func_v32i32(<32 x i32>) #0
+declare hidden void @external_void_func_v32i32_i32(<32 x i32>, i32) #0
+declare hidden void @external_void_func_v32i32_i8_i8_i16(<32 x i32>, i8, i8, i16) #0
+
+; Structs
+declare hidden void @external_void_func_struct_i8_i32({ i8, i32 }) #0
+declare hidden void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval) #0
+declare hidden void @external_void_func_sret_struct_i8_i32_byval_struct_i8_i32({ i8, i32 } addrspace(5)* sret, { i8, i32 } addrspace(5)* byval) #0
+
+declare hidden void @external_void_func_v2i8(<2 x i8>) #0
+declare hidden void @external_void_func_v3i8(<3 x i8>) #0
+declare hidden void @external_void_func_v4i8(<4 x i8>) #0
+declare hidden void @external_void_func_v8i8(<8 x i8>) #0
+declare hidden void @external_void_func_v16i8(<16 x i8>) #0
+
+declare hidden void @byval_align16_f64_arg(<32 x i32>, double addrspace(5)* byval align 16) #0
+declare hidden void @stack_passed_f64_arg(<32 x i32>, double) #0
+declare hidden void @external_void_func_12xv3i32(<3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>,
+    <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>, <3 x i32>) #0
+declare hidden void @external_void_func_8xv5i32(<5 x i32>, <5 x i32>, <5 x i32>, <5 x i32>,
+    <5 x i32>, <5 x i32>, <5 x i32>, <5 x i32>) #0
+declare hidden void @external_void_func_12xv3f32(<3 x float>, <3 x float>, <3 x float>, <3 x float>,
+    <3 x float>, <3 x float>, <3 x float>, <3 x float>, <3 x float>, <3 x float>, <3 x float>, <3 x float>) #0
+declare hidden void @external_void_func_8xv5f32(<5 x float>, <5 x float>, <5 x float>, <5 x float>,
+    <5 x float>, <5 x float>, <5 x float>, <5 x float>) #0
+
+define amdgpu_kernel void @test_call_external_void_func_void() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_void
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_void
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_void, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_void()
+  ret void
+}
+
+define void @test_func_call_external_void_func_void() #0 {
+  ; CHECK-LABEL: name: test_func_call_external_void_func_void
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr31
+  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_void
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY13]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY15]](s32)
+  ; CHECK:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_void, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   [[COPY18:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
+  ; CHECK:   S_SETPC_B64_return [[COPY18]]
+  call void @external_void_func_void()
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_empty_struct() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_empty_struct
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_empty_struct
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[C]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_empty_struct, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_empty_struct({} zeroinitializer, i32 23)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_empty_array() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_empty_array
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_empty_array
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[C]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_empty_array, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_empty_array([0 x i8] zeroinitializer, i32 23)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i1_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i1_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s1)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i1
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i1, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_i1(i1 true)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i1_signext(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i1_signext
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (volatile load 1 from `i1 addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s1)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i1_signext
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[SEXT]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i1_signext, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %var = load volatile i1, i1 addrspace(1)* undef
+  call void @external_void_func_i1_signext(i1 signext %var)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i1_zeroext(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i1_zeroext
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (volatile load 1 from `i1 addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i1_zeroext
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[ZEXT]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i1_zeroext, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %var = load volatile i1, i1 addrspace(1)* undef
+  call void @external_void_func_i1_zeroext(i1 zeroext %var)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i8_imm(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i8_imm
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 123
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i8
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i8, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_i8(i8 123)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i8_signext(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i8_signext
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (volatile load 1 from `i8 addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s8)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i8_signext
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[SEXT]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i8_signext, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %var = load volatile i8, i8 addrspace(1)* undef
+  call void @external_void_func_i8_signext(i8 signext %var)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i8_zeroext(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i8_zeroext
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (volatile load 1 from `i8 addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s8)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i8_zeroext
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[ZEXT]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i8_zeroext, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %var = load volatile i8, i8 addrspace(1)* undef
+  call void @external_void_func_i8_zeroext(i8 zeroext %var)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i16_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i16_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 123
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i16
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i16, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_i16(i16 123)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i16_signext(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i16_signext
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (volatile load 2 from `i16 addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s16)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i16_signext
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[SEXT]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i16_signext, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %var = load volatile i16, i16 addrspace(1)* undef
+  call void @external_void_func_i16_signext(i16 signext %var)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i16_zeroext(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i16_zeroext
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (volatile load 2 from `i16 addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i16_zeroext
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[ZEXT]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i16_zeroext, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %var = load volatile i16, i16 addrspace(1)* undef
+  call void @external_void_func_i16_zeroext(i16 zeroext %var)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i32_imm(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i32_imm
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[C]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_i32(i32 42)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i64_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i64_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 123
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i64
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i64, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_i64(i64 123)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v2i64() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v2i64
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[C]](p1) :: (load 16 from `<2 x i64> addrspace(1)* null`, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v2i64
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v2i64, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %val = load <2 x i64>, <2 x i64> addrspace(1)* null
+  call void @external_void_func_v2i64(<2 x i64> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v2i64_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v2i64_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8589934593
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 17179869187
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C1]](s64)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s64>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v2i64
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C2]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C3]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C4]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v2i64, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v2i64(<2 x i64> <i64 8589934593, i64 17179869187>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i48(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i48
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (volatile load 6 from `i48 addrspace(1)* undef`, align 8, addrspace 1)
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s48)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s64)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i48
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i48, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %var = load volatile i48, i48 addrspace(1)* undef
+  call void @external_void_func_i48(i48 %var)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i48_signext(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i48_signext
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (volatile load 6 from `i48 addrspace(1)* undef`, align 8, addrspace 1)
+  ; CHECK:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD]](s48)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i48_signext
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i48_signext, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %var = load volatile i48, i48 addrspace(1)* undef
+  call void @external_void_func_i48_signext(i48 signext %var)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_i48_zeroext(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_i48_zeroext
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (volatile load 6 from `i48 addrspace(1)* undef`, align 8, addrspace 1)
+  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s48)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_i48_zeroext
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_i48_zeroext, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %var = load volatile i48, i48 addrspace(1)* undef
+  call void @external_void_func_i48_zeroext(i48 zeroext %var)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_p0_imm(i8* %arg) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_p0_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[INT]](p4) :: (dereferenceable invariant load 8 from %ir.arg.kernarg.offset.cast, align 16, addrspace 4)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](p0)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_p0
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_p0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_p0(i8* %arg)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v2p0() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v2p0
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x p0>) = G_LOAD [[C]](p1) :: (load 16 from `<2 x i8*> addrspace(1)* null`, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x p0>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v2p0
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v2p0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %val = load <2 x i8*>, <2 x i8*> addrspace(1)* null
+  call void @external_void_func_v2p0(<2 x i8*> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v3i64() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v3i64
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8589934593
+  ; CHECK:   [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[DEF]](s64)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[C]](p1) :: (load 16 from `<2 x i64> addrspace(1)* null`, addrspace 1)
+  ; CHECK:   [[SHUF:%[0-9]+]]:_(<3 x s64>) = G_SHUFFLE_VECTOR [[LOAD]](<2 x s64>), [[BUILD_VECTOR]], shufflemask(0, 1, 2)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHUF]](<3 x s64>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v3i64
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C2]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C3]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C4]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v3i64, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %load = load <2 x i64>, <2 x i64> addrspace(1)* null
+  %val = shufflevector <2 x i64> %load, <2 x i64> <i64 8589934593, i64 undef>, <3 x i32> <i32 0, i32 1, i32 2>
+
+  call void @external_void_func_v3i64(<3 x i64> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v4i64() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v4i64
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8589934593
+  ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 17179869187
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C2]](s64)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[C]](p1) :: (load 16 from `<2 x i64> addrspace(1)* null`, addrspace 1)
+  ; CHECK:   [[SHUF:%[0-9]+]]:_(<4 x s64>) = G_SHUFFLE_VECTOR [[LOAD]](<2 x s64>), [[BUILD_VECTOR]], shufflemask(0, 1, 2, 3)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHUF]](<4 x s64>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v4i64
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C3]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C4]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C5]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v4i64, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %load = load <2 x i64>, <2 x i64> addrspace(1)* null
+  %val = shufflevector <2 x i64> %load, <2 x i64> <i64 8589934593, i64 17179869187>, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  call void @external_void_func_v4i64(<4 x i64> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_f16_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_f16_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4400
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_f16
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
+  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_f16, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_f16(half 4.0)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_f32_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_f32_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 4.000000e+00
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_f32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[C]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_f32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_f32(float 4.0)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v2f32_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v2f32_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v2f32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C2]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C3]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C4]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v2f32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v2f32(<2 x float> <float 1.0, float 2.0>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v3f32_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v3f32_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 4.000000e+00
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C2]](s32)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<3 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v3f32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C3]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C4]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C5]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v3f32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v3f32(<3 x float> <float 1.0, float 2.0, float 4.0>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v5f32_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v5f32_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 4.000000e+00
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_FCONSTANT float -1.000000e+00
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C2]](s32), [[C3]](s32), [[C4]](s32)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<5 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v5f32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C5]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C6]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C7]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v5f32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v5f32(<5 x float> <float 1.0, float 2.0, float 4.0, float -1.0, float 0.5>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_f64_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_f64_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.000000e+00
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_f64
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_f64, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_f64(double 4.0)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v2f64_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v2f64_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.000000e+00
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C1]](s64)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s64>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v2f64
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C2]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C3]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C4]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v2f64, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v2f64(<2 x double> <double 2.0, double 4.0>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v3f64_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v3f64_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.000000e+00
+  ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 8.000000e+00
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C1]](s64), [[C2]](s64)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<3 x s64>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v3f64
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C3]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C4]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C5]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v3f64, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v3f64(<3 x double> <double 2.0, double 4.0, double 8.0>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v2i16() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v2i16
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load 4 from `<2 x i16> addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v2i16
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[LOAD]](<2 x s16>)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v2i16, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %val = load <2 x i16>, <2 x i16> addrspace(1)* undef
+  call void @external_void_func_v2i16(<2 x i16> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v3i16() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v3i16
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[DEF]](p1) :: (load 6 from `<3 x i16> addrspace(1)* undef`, align 8, addrspace 1)
+  ; CHECK:   [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+  ; CHECK:   [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[LOAD]](<3 x s16>), 0
+  ; CHECK:   [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 0
+  ; CHECK:   [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 32
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v3i16
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[EXTRACT]](<2 x s16>)
+  ; CHECK:   $vgpr1 = COPY [[EXTRACT1]](<2 x s16>)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v3i16, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %val = load <3 x i16>, <3 x i16> addrspace(1)* undef
+  call void @external_void_func_v3i16(<3 x i16> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v3f16() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v3f16
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[DEF]](p1) :: (load 6 from `<3 x half> addrspace(1)* undef`, align 8, addrspace 1)
+  ; CHECK:   [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+  ; CHECK:   [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[LOAD]](<3 x s16>), 0
+  ; CHECK:   [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 0
+  ; CHECK:   [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 32
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v3f16
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[EXTRACT]](<2 x s16>)
+  ; CHECK:   $vgpr1 = COPY [[EXTRACT1]](<2 x s16>)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v3f16, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %val = load <3 x half>, <3 x half> addrspace(1)* undef
+  call void @external_void_func_v3f16(<3 x half> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v4i16() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v4i16
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load 8 from `<4 x i16> addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v4i16
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](<2 x s16>)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](<2 x s16>)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v4i16, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %val = load <4 x i16>, <4 x i16> addrspace(1)* undef
+  call void @external_void_func_v4i16(<4 x i16> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v4i16_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v4i16_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+  ; CHECK:   [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
+  ; CHECK:   [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 3
+  ; CHECK:   [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16)
+  ; CHECK:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s16>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v4i16
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C4]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C5]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C6]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](<2 x s16>)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](<2 x s16>)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v4i16, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v4i16(<4 x i16> <i16 1, i16 2, i16 3, i16 4>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v2f16() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v2f16
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load 4 from `<2 x half> addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v2f16
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[LOAD]](<2 x s16>)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v2f16, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %val = load <2 x half>, <2 x half> addrspace(1)* undef
+  call void @external_void_func_v2f16(<2 x half> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v2i32() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v2i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[DEF]](p1) :: (load 8 from `<2 x i32> addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v2i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v2i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %val = load <2 x i32>, <2 x i32> addrspace(1)* undef
+  call void @external_void_func_v2i32(<2 x i32> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v2i32_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v2i32_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v2i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C2]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C3]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C4]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v2i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v2i32(<2 x i32> <i32 1, i32 2>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v3i32_imm(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v3i32_imm
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C2]](s32)
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<3 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v3i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C3]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C4]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C5]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v3i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v3i32(<3 x i32> <i32 3, i32 4, i32 5>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v3i32_i32(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v3i32_i32
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C2]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<3 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v3i32_i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C4]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C5]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C6]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[C3]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v3i32_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v3i32_i32(<3 x i32> <i32 3, i32 4, i32 5>, i32 6)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v4i32() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v4i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[DEF]](p1) :: (load 16 from `<4 x i32> addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v4i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v4i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %val = load <4 x i32>, <4 x i32> addrspace(1)* undef
+  call void @external_void_func_v4i32(<4 x i32> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v4i32_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v4i32_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C2]](s32), [[C3]](s32)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v4i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C4]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C5]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C6]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v4i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v4i32(<4 x i32> <i32 1, i32 2, i32 3, i32 4>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v5i32_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v5i32_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C2]](s32), [[C3]](s32), [[C4]](s32)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<5 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v5i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C5]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C6]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C7]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v5i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v5i32(<5 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v8i32() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v8i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load 8 from `<8 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[LOAD]](p1) :: (load 32 from %ir.ptr, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<8 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v8i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v8i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %ptr = load <8 x i32> addrspace(1)*, <8 x i32> addrspace(1)* addrspace(4)* undef
+  %val = load <8 x i32>, <8 x i32> addrspace(1)* %ptr
+  call void @external_void_func_v8i32(<8 x i32> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v8i32_imm() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v8i32_imm
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+  ; CHECK:   [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C2]](s32), [[C3]](s32), [[C4]](s32), [[C5]](s32), [[C6]](s32), [[C7]](s32)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<8 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v8i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C8]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C9]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C10]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v8i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  call void @external_void_func_v8i32(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v16i32() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v16i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load 8 from `<16 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[LOAD]](p1) :: (load 64 from %ir.ptr, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v16i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
+  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
+  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
+  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
+  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
+  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
+  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
+  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v16i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %ptr = load <16 x i32> addrspace(1)*, <16 x i32> addrspace(1)* addrspace(4)* undef
+  %val = load <16 x i32>, <16 x i32> addrspace(1)* %ptr
+  call void @external_void_func_v16i32(<16 x i32> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v32i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load 8 from `<32 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load 128 from %ir.ptr, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v32i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
+  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
+  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
+  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
+  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
+  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
+  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
+  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
+  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
+  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
+  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
+  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
+  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
+  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
+  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
+  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
+  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
+  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
+  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
+  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
+  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
+  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
+  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(p5) = COPY $sp_reg
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY20]], [[C3]](s32)
+  ; CHECK:   G_STORE [[UV31]](s32), [[PTR_ADD1]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; CHECK:   [[COPY21:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY21]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v32i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 4, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %ptr = load <32 x i32> addrspace(1)*, <32 x i32> addrspace(1)* addrspace(4)* undef
+  %val = load <32 x i32>, <32 x i32> addrspace(1)* %ptr
+  call void @external_void_func_v32i32(<32 x i32> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v32i32_i32(i32) #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v32i32_i32
+  ; CHECK: bb.1 (%ir-block.1):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
+  ; CHECK:   [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load 8 from `<32 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load 128 from %ir.ptr0, addrspace 1)
+  ; CHECK:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p1) :: (load 4 from `i32 addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v32i32_i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
+  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
+  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
+  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
+  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
+  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
+  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
+  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
+  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
+  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
+  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
+  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
+  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
+  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
+  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
+  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
+  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
+  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
+  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
+  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
+  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
+  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
+  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(p5) = COPY $sp_reg
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY20]], [[C3]](s32)
+  ; CHECK:   G_STORE [[UV31]](s32), [[PTR_ADD1]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY20]], [[C4]](s32)
+  ; CHECK:   G_STORE [[LOAD2]](s32), [[PTR_ADD2]](p5) :: (store 4 into stack + 4, addrspace 5)
+  ; CHECK:   [[COPY21:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY21]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v32i32_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 8, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %ptr0 = load <32 x i32> addrspace(1)*, <32 x i32> addrspace(1)* addrspace(4)* undef
+  %val0 = load <32 x i32>, <32 x i32> addrspace(1)* %ptr0
+  %val1 = load i32, i32 addrspace(1)* undef
+  call void @external_void_func_v32i32_i32(<32 x i32> %val0, i32 %val1)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v32i32_i8_i8_i16() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v32i32_i8_i8_i16
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
+  ; CHECK:   [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p1) = COPY [[DEF1]](p1)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load 8 from `<32 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load 128 from %ir.ptr0, addrspace 1)
+  ; CHECK:   [[LOAD2:%[0-9]+]]:_(s8) = G_LOAD [[DEF1]](p1) :: (load 1 from `i8 addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[LOAD3:%[0-9]+]]:_(s16) = G_LOAD [[COPY10]](p1) :: (load 2 from `i16 addrspace(1)* undef`, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD2]](s8)
+  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD3]](s16)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v32i32_i8_i8_i16
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY13]], [[C]](s64)
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY18]], [[SHL]]
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY20]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
+  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
+  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
+  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
+  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
+  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
+  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
+  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
+  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
+  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
+  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
+  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
+  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
+  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
+  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
+  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
+  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
+  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
+  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
+  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
+  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
+  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
+  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
+  ; CHECK:   [[COPY21:%[0-9]+]]:_(p5) = COPY $sp_reg
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY21]], [[C3]](s32)
+  ; CHECK:   G_STORE [[UV31]](s32), [[PTR_ADD1]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY21]], [[C4]](s32)
+  ; CHECK:   G_STORE [[ANYEXT]](s32), [[PTR_ADD2]](p5) :: (store 4 into stack + 4, addrspace 5)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; CHECK:   [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY21]], [[C5]](s32)
+  ; CHECK:   G_STORE [[ANYEXT]](s32), [[PTR_ADD3]](p5) :: (store 4 into stack + 8, align 8, addrspace 5)
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+  ; CHECK:   [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY21]], [[C6]](s32)
+  ; CHECK:   G_STORE [[ANYEXT1]](s32), [[PTR_ADD4]](p5) :: (store 4 into stack + 12, addrspace 5)
+  ; CHECK:   [[COPY22:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY22]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY12]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY14]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY16]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY17]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v32i32_i8_i8_i16, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 16, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %ptr0 = load <32 x i32> addrspace(1)*, <32 x i32> addrspace(1)* addrspace(4)* undef
+  %val0 = load <32 x i32>, <32 x i32> addrspace(1)* %ptr0
+  %val1 = load i8, i8 addrspace(1)* undef
+  %val2 = load i8, i8 addrspace(1)* undef
+  %val3 = load i16, i16 addrspace(1)* undef
+  call void @external_void_func_v32i32_i8_i8_i16(<32 x i32> %val0, i8 %val1, i8 %val2, i16 %val3)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_struct_i8_i32() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_struct_i8_i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load 8 from `{ i8, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: (load 1 from %ir.ptr0, align 4, addrspace 1)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
+  ; CHECK:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from %ir.ptr0 + 4, addrspace 1)
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s8)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_struct_i8_i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[ANYEXT]](s32)
+  ; CHECK:   $vgpr1 = COPY [[LOAD2]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD1]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_struct_i8_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %ptr0 = load { i8, i32 } addrspace(1)*, { i8, i32 } addrspace(1)* addrspace(4)* undef
+  %val = load { i8, i32 }, { i8, i32 } addrspace(1)* %ptr0
+  call void @external_void_func_struct_i8_i32({ i8, i32 } %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_byval_struct_i8_i32() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_byval_struct_i8_i32
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 3
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; CHECK:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.val
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C2]](s32)
+  ; CHECK:   G_STORE [[C]](s8), [[FRAME_INDEX]](p5) :: (store 1 into %ir.gep01, addrspace 5)
+  ; CHECK:   G_STORE [[C1]](s32), [[PTR_ADD]](p5) :: (store 4 into %ir.gep1, addrspace 5)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_byval_struct_i8_i32
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C3]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C4]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C5]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(p5) = COPY $sp_reg
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK:   [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY20]], [[C6]](s32)
+  ; CHECK:   G_STORE [[FRAME_INDEX]](p5), [[PTR_ADD2]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; CHECK:   [[COPY21:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY21]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD1]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_byval_struct_i8_i32, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 8, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %val = alloca { i8, i32 }, align 4, addrspace(5)
+  %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %val, i32 0, i32 0
+  %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %val, i32 0, i32 1
+  store i8 3, i8 addrspace(5)* %gep0
+  store i32 8, i32 addrspace(5)* %gep1
+  call void @external_void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v2i8() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v2i8
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load 8 from `<2 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<2 x s8>) = G_LOAD [[LOAD]](p1) :: (load 2 from %ir.ptr, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<2 x s8>)
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
+  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v2i8
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
+  ; CHECK:   $vgpr0 = COPY [[ANYEXT2]](s32)
+  ; CHECK:   [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
+  ; CHECK:   $vgpr1 = COPY [[ANYEXT3]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v2i8, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %ptr = load <2 x i8> addrspace(1)*, <2 x i8> addrspace(1)* addrspace(4)* undef
+  %val = load <2 x i8>, <2 x i8> addrspace(1)* %ptr
+  call void @external_void_func_v2i8(<2 x i8> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v3i8() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v3i8
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load 8 from `<3 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<3 x s8>) = G_LOAD [[LOAD]](p1) :: (load 3 from %ir.ptr, align 4, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<3 x s8>)
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
+  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
+  ; CHECK:   [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v3i8
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
+  ; CHECK:   $vgpr0 = COPY [[ANYEXT3]](s32)
+  ; CHECK:   [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
+  ; CHECK:   $vgpr1 = COPY [[ANYEXT4]](s32)
+  ; CHECK:   [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
+  ; CHECK:   $vgpr2 = COPY [[ANYEXT5]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v3i8, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %ptr = load <3 x i8> addrspace(1)*, <3 x i8> addrspace(1)* addrspace(4)* undef
+  %val = load <3 x i8>, <3 x i8> addrspace(1)* %ptr
+  call void @external_void_func_v3i8(<3 x i8> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v4i8() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v4i8
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load 8 from `<4 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<4 x s8>) = G_LOAD [[LOAD]](p1) :: (load 4 from %ir.ptr, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<4 x s8>)
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
+  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
+  ; CHECK:   [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
+  ; CHECK:   [[ANYEXT3:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v4i8
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
+  ; CHECK:   $vgpr0 = COPY [[ANYEXT4]](s32)
+  ; CHECK:   [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
+  ; CHECK:   $vgpr1 = COPY [[ANYEXT5]](s32)
+  ; CHECK:   [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
+  ; CHECK:   $vgpr2 = COPY [[ANYEXT6]](s32)
+  ; CHECK:   [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT3]](s16)
+  ; CHECK:   $vgpr3 = COPY [[ANYEXT7]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v4i8, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %ptr = load <4 x i8> addrspace(1)*, <4 x i8> addrspace(1)* addrspace(4)* undef
+  %val = load <4 x i8>, <4 x i8> addrspace(1)* %ptr
+  call void @external_void_func_v4i8(<4 x i8> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v8i8() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v8i8
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load 8 from `<8 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<8 x s8>) = G_LOAD [[LOAD]](p1) :: (load 8 from %ir.ptr, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<8 x s8>)
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
+  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
+  ; CHECK:   [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
+  ; CHECK:   [[ANYEXT3:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
+  ; CHECK:   [[ANYEXT4:%[0-9]+]]:_(s16) = G_ANYEXT [[UV4]](s8)
+  ; CHECK:   [[ANYEXT5:%[0-9]+]]:_(s16) = G_ANYEXT [[UV5]](s8)
+  ; CHECK:   [[ANYEXT6:%[0-9]+]]:_(s16) = G_ANYEXT [[UV6]](s8)
+  ; CHECK:   [[ANYEXT7:%[0-9]+]]:_(s16) = G_ANYEXT [[UV7]](s8)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v8i8
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
+  ; CHECK:   $vgpr0 = COPY [[ANYEXT8]](s32)
+  ; CHECK:   [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
+  ; CHECK:   $vgpr1 = COPY [[ANYEXT9]](s32)
+  ; CHECK:   [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
+  ; CHECK:   $vgpr2 = COPY [[ANYEXT10]](s32)
+  ; CHECK:   [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT3]](s16)
+  ; CHECK:   $vgpr3 = COPY [[ANYEXT11]](s32)
+  ; CHECK:   [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT4]](s16)
+  ; CHECK:   $vgpr4 = COPY [[ANYEXT12]](s32)
+  ; CHECK:   [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT5]](s16)
+  ; CHECK:   $vgpr5 = COPY [[ANYEXT13]](s32)
+  ; CHECK:   [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT6]](s16)
+  ; CHECK:   $vgpr6 = COPY [[ANYEXT14]](s32)
+  ; CHECK:   [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT7]](s16)
+  ; CHECK:   $vgpr7 = COPY [[ANYEXT15]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v8i8, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %ptr = load <8 x i8> addrspace(1)*, <8 x i8> addrspace(1)* addrspace(4)* undef
+  %val = load <8 x i8>, <8 x i8> addrspace(1)* %ptr
+  call void @external_void_func_v8i8(<8 x i8> %val)
+  ret void
+}
+
+define amdgpu_kernel void @test_call_external_void_func_v16i8() #0 {
+  ; CHECK-LABEL: name: test_call_external_void_func_v16i8
+  ; CHECK: bb.1 (%ir-block.0):
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load 8 from `<16 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[LOAD]](p1) :: (load 16 from %ir.ptr, addrspace 1)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8), [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<16 x s8>)
+  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
+  ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
+  ; CHECK:   [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
+  ; CHECK:   [[ANYEXT3:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
+  ; CHECK:   [[ANYEXT4:%[0-9]+]]:_(s16) = G_ANYEXT [[UV4]](s8)
+  ; CHECK:   [[ANYEXT5:%[0-9]+]]:_(s16) = G_ANYEXT [[UV5]](s8)
+  ; CHECK:   [[ANYEXT6:%[0-9]+]]:_(s16) = G_ANYEXT [[UV6]](s8)
+  ; CHECK:   [[ANYEXT7:%[0-9]+]]:_(s16) = G_ANYEXT [[UV7]](s8)
+  ; CHECK:   [[ANYEXT8:%[0-9]+]]:_(s16) = G_ANYEXT [[UV8]](s8)
+  ; CHECK:   [[ANYEXT9:%[0-9]+]]:_(s16) = G_ANYEXT [[UV9]](s8)
+  ; CHECK:   [[ANYEXT10:%[0-9]+]]:_(s16) = G_ANYEXT [[UV10]](s8)
+  ; CHECK:   [[ANYEXT11:%[0-9]+]]:_(s16) = G_ANYEXT [[UV11]](s8)
+  ; CHECK:   [[ANYEXT12:%[0-9]+]]:_(s16) = G_ANYEXT [[UV12]](s8)
+  ; CHECK:   [[ANYEXT13:%[0-9]+]]:_(s16) = G_ANYEXT [[UV13]](s8)
+  ; CHECK:   [[ANYEXT14:%[0-9]+]]:_(s16) = G_ANYEXT [[UV14]](s8)
+  ; CHECK:   [[ANYEXT15:%[0-9]+]]:_(s16) = G_ANYEXT [[UV15]](s8)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_v16i8
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   [[ANYEXT16:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
+  ; CHECK:   $vgpr0 = COPY [[ANYEXT16]](s32)
+  ; CHECK:   [[ANYEXT17:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
+  ; CHECK:   $vgpr1 = COPY [[ANYEXT17]](s32)
+  ; CHECK:   [[ANYEXT18:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
+  ; CHECK:   $vgpr2 = COPY [[ANYEXT18]](s32)
+  ; CHECK:   [[ANYEXT19:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT3]](s16)
+  ; CHECK:   $vgpr3 = COPY [[ANYEXT19]](s32)
+  ; CHECK:   [[ANYEXT20:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT4]](s16)
+  ; CHECK:   $vgpr4 = COPY [[ANYEXT20]](s32)
+  ; CHECK:   [[ANYEXT21:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT5]](s16)
+  ; CHECK:   $vgpr5 = COPY [[ANYEXT21]](s32)
+  ; CHECK:   [[ANYEXT22:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT6]](s16)
+  ; CHECK:   $vgpr6 = COPY [[ANYEXT22]](s32)
+  ; CHECK:   [[ANYEXT23:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT7]](s16)
+  ; CHECK:   $vgpr7 = COPY [[ANYEXT23]](s32)
+  ; CHECK:   [[ANYEXT24:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT8]](s16)
+  ; CHECK:   $vgpr8 = COPY [[ANYEXT24]](s32)
+  ; CHECK:   [[ANYEXT25:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT9]](s16)
+  ; CHECK:   $vgpr9 = COPY [[ANYEXT25]](s32)
+  ; CHECK:   [[ANYEXT26:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT10]](s16)
+  ; CHECK:   $vgpr10 = COPY [[ANYEXT26]](s32)
+  ; CHECK:   [[ANYEXT27:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT11]](s16)
+  ; CHECK:   $vgpr11 = COPY [[ANYEXT27]](s32)
+  ; CHECK:   [[ANYEXT28:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT12]](s16)
+  ; CHECK:   $vgpr12 = COPY [[ANYEXT28]](s32)
+  ; CHECK:   [[ANYEXT29:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT13]](s16)
+  ; CHECK:   $vgpr13 = COPY [[ANYEXT29]](s32)
+  ; CHECK:   [[ANYEXT30:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT14]](s16)
+  ; CHECK:   $vgpr14 = COPY [[ANYEXT30]](s32)
+  ; CHECK:   [[ANYEXT31:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT15]](s16)
+  ; CHECK:   $vgpr15 = COPY [[ANYEXT31]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_v16i8, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+  %ptr = load <16 x i8> addrspace(1)*, <16 x i8> addrspace(1)* addrspace(4)* undef
+  %val = load <16 x i8>, <16 x i8> addrspace(1)* %ptr
+  call void @external_void_func_v16i8(<16 x i8> %val)
+  ret void
+}
+
+define amdgpu_kernel void @stack_passed_arg_alignment_v32i32_f64(<32 x i32> %val, double %tmp) #0 {
+  ; CHECK-LABEL: name: stack_passed_arg_alignment_v32i32_f64
+  ; CHECK: bb.1.entry:
+  ; CHECK:   liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
+  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2
+  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1
+  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9
+  ; CHECK:   [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[INT]](p4) :: (dereferenceable invariant load 128 from %ir.val.kernarg.offset.cast, align 16, addrspace 4)
+  ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[INT]], [[C]](s64)
+  ; CHECK:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load 8 from %ir.tmp.kernarg.offset.cast, align 16, addrspace 4)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<32 x s32>)
+  ; CHECK:   [[UV32:%[0-9]+]]:_(s32), [[UV33:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](s64)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @stack_passed_f64_arg
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 136
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C1]](s64)
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C2]](s32)
+  ; CHECK:   [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]]
+  ; CHECK:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C3]](s32)
+  ; CHECK:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
+  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
+  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
+  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
+  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
+  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
+  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
+  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
+  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
+  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
+  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
+  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
+  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
+  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
+  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
+  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
+  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
+  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
+  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
+  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
+  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
+  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
+  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
+  ; CHECK:   [[COPY20:%[0-9]+]]:_(p5) = COPY $sp_reg
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK:   [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY20]], [[C4]](s32)
+  ; CHECK:   G_STORE [[UV31]](s32), [[PTR_ADD2]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY20]], [[C5]](s32)
+  ; CHECK:   G_STORE [[UV32]](s32), [[PTR_ADD3]](p5) :: (store 4 into stack + 4, addrspace 5)
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; CHECK:   [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY20]], [[C6]](s32)
+  ; CHECK:   G_STORE [[UV33]](s32), [[PTR_ADD4]](p5) :: (store 4 into stack + 8, align 8, addrspace 5)
+  ; CHECK:   [[COPY21:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY21]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[PTR_ADD1]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK:   $vgpr31 = COPY [[OR1]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @stack_passed_f64_arg, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 12, implicit-def $scc
+  ; CHECK:   S_ENDPGM 0
+entry:
+  call void @stack_passed_f64_arg(<32 x i32> %val, double %tmp)
+  ret void
+}
+
+define void @stack_12xv3i32() #0 {
+  ; CHECK-LABEL: name: stack_12xv3i32
+  ; CHECK: bb.1.entry:
+  ; CHECK:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr31
+  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK:   [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; CHECK:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C2]](s32), [[C2]](s32), [[C2]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+  ; CHECK:   [[BUILD_VECTOR3:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[BUILD_VECTOR4:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C4]](s32), [[C4]](s32), [[C4]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+  ; CHECK:   [[BUILD_VECTOR5:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C5]](s32), [[C5]](s32), [[C5]](s32)
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
+  ; CHECK:   [[BUILD_VECTOR6:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C6]](s32), [[C6]](s32), [[C6]](s32)
+  ; CHECK:   [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+  ; CHECK:   [[BUILD_VECTOR7:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C7]](s32), [[C7]](s32), [[C7]](s32)
+  ; CHECK:   [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; CHECK:   [[BUILD_VECTOR8:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C8]](s32), [[C8]](s32), [[C8]](s32)
+  ; CHECK:   [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
+  ; CHECK:   [[BUILD_VECTOR9:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C9]](s32), [[C9]](s32), [[C9]](s32)
+  ; CHECK:   [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
+  ; CHECK:   [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+  ; CHECK:   [[BUILD_VECTOR10:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C10]](s32), [[C11]](s32), [[C12]](s32)
+  ; CHECK:   [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
+  ; CHECK:   [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 14
+  ; CHECK:   [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
+  ; CHECK:   [[BUILD_VECTOR11:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C13]](s32), [[C14]](s32), [[C15]](s32)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<3 x s32>)
+  ; CHECK:   [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR1]](<3 x s32>)
+  ; CHECK:   [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR2]](<3 x s32>)
+  ; CHECK:   [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR3]](<3 x s32>)
+  ; CHECK:   [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR4]](<3 x s32>)
+  ; CHECK:   [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR5]](<3 x s32>)
+  ; CHECK:   [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR6]](<3 x s32>)
+  ; CHECK:   [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR7]](<3 x s32>)
+  ; CHECK:   [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR8]](<3 x s32>)
+  ; CHECK:   [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR9]](<3 x s32>)
+  ; CHECK:   [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32), [[UV32:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR10]](<3 x s32>)
+  ; CHECK:   [[UV33:%[0-9]+]]:_(s32), [[UV34:%[0-9]+]]:_(s32), [[UV35:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR11]](<3 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_12xv3i32
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
+  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
+  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
+  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
+  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
+  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
+  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
+  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
+  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
+  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
+  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
+  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
+  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
+  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
+  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
+  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
+  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
+  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
+  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
+  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
+  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
+  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
+  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(p5) = COPY $sgpr32
+  ; CHECK:   [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C16]](s32)
+  ; CHECK:   G_STORE [[UV31]](s32), [[PTR_ADD]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; CHECK:   [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C17]](s32)
+  ; CHECK:   G_STORE [[UV32]](s32), [[PTR_ADD1]](p5) :: (store 4 into stack + 4, addrspace 5)
+  ; CHECK:   [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; CHECK:   [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C18]](s32)
+  ; CHECK:   G_STORE [[UV33]](s32), [[PTR_ADD2]](p5) :: (store 4 into stack + 8, align 8, addrspace 5)
+  ; CHECK:   [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+  ; CHECK:   [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C19]](s32)
+  ; CHECK:   G_STORE [[UV34]](s32), [[PTR_ADD3]](p5) :: (store 4 into stack + 12, addrspace 5)
+  ; CHECK:   [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+  ; CHECK:   [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C20]](s32)
+  ; CHECK:   G_STORE [[UV35]](s32), [[PTR_ADD4]](p5) :: (store 4 into stack + 16, align 16, addrspace 5)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY13]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY15]](s32)
+  ; CHECK:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_12xv3i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 20, implicit-def $scc
+  ; CHECK:   [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
+  ; CHECK:   S_SETPC_B64_return [[COPY19]]
+entry:
+  call void @external_void_func_12xv3i32(
+      <3 x i32> <i32 0, i32 0, i32 0>,
+      <3 x i32> <i32 1, i32 1, i32 1>,
+      <3 x i32> <i32 2, i32 2, i32 2>,
+      <3 x i32> <i32 3, i32 3, i32 3>,
+      <3 x i32> <i32 4, i32 4, i32 4>,
+      <3 x i32> <i32 5, i32 5, i32 5>,
+      <3 x i32> <i32 6, i32 6, i32 6>,
+      <3 x i32> <i32 7, i32 7, i32 7>,
+      <3 x i32> <i32 8, i32 8, i32 8>,
+      <3 x i32> <i32 9, i32 9, i32 9>,
+      <3 x i32> <i32 10, i32 11, i32 12>,
+      <3 x i32> <i32 13, i32 14, i32 15>)
+  ret void
+}
+
+define void @stack_12xv3f32() #0 {
+  ; CHECK-LABEL: name: stack_12xv3f32
+  ; CHECK: bb.1.entry:
+  ; CHECK:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr31
+  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK:   [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
+  ; CHECK:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C2]](s32), [[C2]](s32), [[C2]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_FCONSTANT float 3.000000e+00
+  ; CHECK:   [[BUILD_VECTOR3:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_FCONSTANT float 4.000000e+00
+  ; CHECK:   [[BUILD_VECTOR4:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C4]](s32), [[C4]](s32), [[C4]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e+00
+  ; CHECK:   [[BUILD_VECTOR5:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C5]](s32), [[C5]](s32), [[C5]](s32)
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_FCONSTANT float 6.000000e+00
+  ; CHECK:   [[BUILD_VECTOR6:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C6]](s32), [[C6]](s32), [[C6]](s32)
+  ; CHECK:   [[C7:%[0-9]+]]:_(s32) = G_FCONSTANT float 7.000000e+00
+  ; CHECK:   [[BUILD_VECTOR7:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C7]](s32), [[C7]](s32), [[C7]](s32)
+  ; CHECK:   [[C8:%[0-9]+]]:_(s32) = G_FCONSTANT float 8.000000e+00
+  ; CHECK:   [[BUILD_VECTOR8:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C8]](s32), [[C8]](s32), [[C8]](s32)
+  ; CHECK:   [[C9:%[0-9]+]]:_(s32) = G_FCONSTANT float 9.000000e+00
+  ; CHECK:   [[BUILD_VECTOR9:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C9]](s32), [[C9]](s32), [[C9]](s32)
+  ; CHECK:   [[C10:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+01
+  ; CHECK:   [[C11:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.100000e+01
+  ; CHECK:   [[C12:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.200000e+01
+  ; CHECK:   [[BUILD_VECTOR10:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C10]](s32), [[C11]](s32), [[C12]](s32)
+  ; CHECK:   [[C13:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.300000e+01
+  ; CHECK:   [[C14:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.400000e+01
+  ; CHECK:   [[C15:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.500000e+01
+  ; CHECK:   [[BUILD_VECTOR11:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C13]](s32), [[C14]](s32), [[C15]](s32)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<3 x s32>)
+  ; CHECK:   [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR1]](<3 x s32>)
+  ; CHECK:   [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR2]](<3 x s32>)
+  ; CHECK:   [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR3]](<3 x s32>)
+  ; CHECK:   [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR4]](<3 x s32>)
+  ; CHECK:   [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR5]](<3 x s32>)
+  ; CHECK:   [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR6]](<3 x s32>)
+  ; CHECK:   [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR7]](<3 x s32>)
+  ; CHECK:   [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR8]](<3 x s32>)
+  ; CHECK:   [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR9]](<3 x s32>)
+  ; CHECK:   [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32), [[UV32:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR10]](<3 x s32>)
+  ; CHECK:   [[UV33:%[0-9]+]]:_(s32), [[UV34:%[0-9]+]]:_(s32), [[UV35:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR11]](<3 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_12xv3f32
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
+  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
+  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
+  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
+  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
+  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
+  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
+  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
+  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
+  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
+  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
+  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
+  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
+  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
+  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
+  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
+  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
+  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
+  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
+  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
+  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
+  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
+  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(p5) = COPY $sgpr32
+  ; CHECK:   [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C16]](s32)
+  ; CHECK:   G_STORE [[UV31]](s32), [[PTR_ADD]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; CHECK:   [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C17]](s32)
+  ; CHECK:   G_STORE [[UV32]](s32), [[PTR_ADD1]](p5) :: (store 4 into stack + 4, addrspace 5)
+  ; CHECK:   [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; CHECK:   [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C18]](s32)
+  ; CHECK:   G_STORE [[UV33]](s32), [[PTR_ADD2]](p5) :: (store 4 into stack + 8, align 8, addrspace 5)
+  ; CHECK:   [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+  ; CHECK:   [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C19]](s32)
+  ; CHECK:   G_STORE [[UV34]](s32), [[PTR_ADD3]](p5) :: (store 4 into stack + 12, addrspace 5)
+  ; CHECK:   [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+  ; CHECK:   [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C20]](s32)
+  ; CHECK:   G_STORE [[UV35]](s32), [[PTR_ADD4]](p5) :: (store 4 into stack + 16, align 16, addrspace 5)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY13]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY15]](s32)
+  ; CHECK:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_12xv3f32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 20, implicit-def $scc
+  ; CHECK:   [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
+  ; CHECK:   S_SETPC_B64_return [[COPY19]]
+entry:
+  call void @external_void_func_12xv3f32(
+      <3 x float> <float 0.0, float 0.0, float 0.0>,
+      <3 x float> <float 1.0, float 1.0, float 1.0>,
+      <3 x float> <float 2.0, float 2.0, float 2.0>,
+      <3 x float> <float 3.0, float 3.0, float 3.0>,
+      <3 x float> <float 4.0, float 4.0, float 4.0>,
+      <3 x float> <float 5.0, float 5.0, float 5.0>,
+      <3 x float> <float 6.0, float 6.0, float 6.0>,
+      <3 x float> <float 7.0, float 7.0, float 7.0>,
+      <3 x float> <float 8.0, float 8.0, float 8.0>,
+      <3 x float> <float 9.0, float 9.0, float 9.0>,
+      <3 x float> <float 10.0, float 11.0, float 12.0>,
+      <3 x float> <float 13.0, float 14.0, float 15.0>)
+  ret void
+}
+
+define void @stack_8xv5i32() #0 {
+  ; CHECK-LABEL: name: stack_8xv5i32
+  ; CHECK: bb.1.entry:
+  ; CHECK:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr31
+  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK:   [[BUILD_VECTOR1:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; CHECK:   [[BUILD_VECTOR2:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C2]](s32), [[C2]](s32), [[C2]](s32), [[C2]](s32), [[C2]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+  ; CHECK:   [[BUILD_VECTOR3:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[BUILD_VECTOR4:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C4]](s32), [[C4]](s32), [[C4]](s32), [[C4]](s32), [[C4]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+  ; CHECK:   [[BUILD_VECTOR5:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C5]](s32), [[C5]](s32), [[C5]](s32), [[C5]](s32), [[C5]](s32)
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
+  ; CHECK:   [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+  ; CHECK:   [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; CHECK:   [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
+  ; CHECK:   [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+  ; CHECK:   [[BUILD_VECTOR6:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C6]](s32), [[C7]](s32), [[C8]](s32), [[C9]](s32), [[C10]](s32)
+  ; CHECK:   [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
+  ; CHECK:   [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+  ; CHECK:   [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
+  ; CHECK:   [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 14
+  ; CHECK:   [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
+  ; CHECK:   [[BUILD_VECTOR7:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C11]](s32), [[C12]](s32), [[C13]](s32), [[C14]](s32), [[C15]](s32)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<5 x s32>)
+  ; CHECK:   [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR1]](<5 x s32>)
+  ; CHECK:   [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR2]](<5 x s32>)
+  ; CHECK:   [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR3]](<5 x s32>)
+  ; CHECK:   [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR4]](<5 x s32>)
+  ; CHECK:   [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR5]](<5 x s32>)
+  ; CHECK:   [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32), [[UV32:%[0-9]+]]:_(s32), [[UV33:%[0-9]+]]:_(s32), [[UV34:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR6]](<5 x s32>)
+  ; CHECK:   [[UV35:%[0-9]+]]:_(s32), [[UV36:%[0-9]+]]:_(s32), [[UV37:%[0-9]+]]:_(s32), [[UV38:%[0-9]+]]:_(s32), [[UV39:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR7]](<5 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_8xv5i32
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
+  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
+  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
+  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
+  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
+  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
+  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
+  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
+  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
+  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
+  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
+  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
+  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
+  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
+  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
+  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
+  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
+  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
+  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
+  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
+  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
+  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
+  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(p5) = COPY $sgpr32
+  ; CHECK:   [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C16]](s32)
+  ; CHECK:   G_STORE [[UV31]](s32), [[PTR_ADD]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; CHECK:   [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C17]](s32)
+  ; CHECK:   G_STORE [[UV32]](s32), [[PTR_ADD1]](p5) :: (store 4 into stack + 4, addrspace 5)
+  ; CHECK:   [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; CHECK:   [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C18]](s32)
+  ; CHECK:   G_STORE [[UV33]](s32), [[PTR_ADD2]](p5) :: (store 4 into stack + 8, align 8, addrspace 5)
+  ; CHECK:   [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+  ; CHECK:   [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C19]](s32)
+  ; CHECK:   G_STORE [[UV34]](s32), [[PTR_ADD3]](p5) :: (store 4 into stack + 12, addrspace 5)
+  ; CHECK:   [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+  ; CHECK:   [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C20]](s32)
+  ; CHECK:   G_STORE [[UV35]](s32), [[PTR_ADD4]](p5) :: (store 4 into stack + 16, align 16, addrspace 5)
+  ; CHECK:   [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C21]](s32)
+  ; CHECK:   G_STORE [[UV36]](s32), [[PTR_ADD5]](p5) :: (store 4 into stack + 20, addrspace 5)
+  ; CHECK:   [[C22:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+  ; CHECK:   [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C22]](s32)
+  ; CHECK:   G_STORE [[UV37]](s32), [[PTR_ADD6]](p5) :: (store 4 into stack + 24, align 8, addrspace 5)
+  ; CHECK:   [[C23:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
+  ; CHECK:   [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C23]](s32)
+  ; CHECK:   G_STORE [[UV38]](s32), [[PTR_ADD7]](p5) :: (store 4 into stack + 28, addrspace 5)
+  ; CHECK:   [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+  ; CHECK:   [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C24]](s32)
+  ; CHECK:   G_STORE [[UV39]](s32), [[PTR_ADD8]](p5) :: (store 4 into stack + 32, align 16, addrspace 5)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY13]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY15]](s32)
+  ; CHECK:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_8xv5i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 36, implicit-def $scc
+  ; CHECK:   [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
+  ; CHECK:   S_SETPC_B64_return [[COPY19]]
+entry:
+  call void @external_void_func_8xv5i32(
+      <5 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0>,
+      <5 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1>,
+      <5 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2>,
+      <5 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3>,
+      <5 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4>,
+      <5 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5>,
+      <5 x i32> <i32 6, i32 7, i32 8, i32 9, i32 10>,
+      <5 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15>)
+  ret void
+}
+
+define void @stack_8xv5f32() #0 {
+  ; CHECK-LABEL: name: stack_8xv5f32
+  ; CHECK: bb.1.entry:
+  ; CHECK:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr31
+  ; CHECK:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
+  ; CHECK:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
+  ; CHECK:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
+  ; CHECK:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
+  ; CHECK:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
+  ; CHECK:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
+  ; CHECK:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
+  ; CHECK:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
+  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+  ; CHECK:   [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK:   [[BUILD_VECTOR1:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
+  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
+  ; CHECK:   [[BUILD_VECTOR2:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C2]](s32), [[C2]](s32), [[C2]](s32), [[C2]](s32), [[C2]](s32)
+  ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_FCONSTANT float 3.000000e+00
+  ; CHECK:   [[BUILD_VECTOR3:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32)
+  ; CHECK:   [[C4:%[0-9]+]]:_(s32) = G_FCONSTANT float 4.000000e+00
+  ; CHECK:   [[BUILD_VECTOR4:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C4]](s32), [[C4]](s32), [[C4]](s32), [[C4]](s32), [[C4]](s32)
+  ; CHECK:   [[C5:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e+00
+  ; CHECK:   [[BUILD_VECTOR5:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C5]](s32), [[C5]](s32), [[C5]](s32), [[C5]](s32), [[C5]](s32)
+  ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_FCONSTANT float 6.000000e+00
+  ; CHECK:   [[C7:%[0-9]+]]:_(s32) = G_FCONSTANT float 7.000000e+00
+  ; CHECK:   [[C8:%[0-9]+]]:_(s32) = G_FCONSTANT float 8.000000e+00
+  ; CHECK:   [[C9:%[0-9]+]]:_(s32) = G_FCONSTANT float 9.000000e+00
+  ; CHECK:   [[C10:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+01
+  ; CHECK:   [[BUILD_VECTOR6:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C6]](s32), [[C7]](s32), [[C8]](s32), [[C9]](s32), [[C10]](s32)
+  ; CHECK:   [[C11:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.100000e+01
+  ; CHECK:   [[C12:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.200000e+01
+  ; CHECK:   [[C13:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.300000e+01
+  ; CHECK:   [[C14:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.400000e+01
+  ; CHECK:   [[C15:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.500000e+01
+  ; CHECK:   [[BUILD_VECTOR7:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C11]](s32), [[C12]](s32), [[C13]](s32), [[C14]](s32), [[C15]](s32)
+  ; CHECK:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<5 x s32>)
+  ; CHECK:   [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR1]](<5 x s32>)
+  ; CHECK:   [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR2]](<5 x s32>)
+  ; CHECK:   [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR3]](<5 x s32>)
+  ; CHECK:   [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR4]](<5 x s32>)
+  ; CHECK:   [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR5]](<5 x s32>)
+  ; CHECK:   [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32), [[UV32:%[0-9]+]]:_(s32), [[UV33:%[0-9]+]]:_(s32), [[UV34:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR6]](<5 x s32>)
+  ; CHECK:   [[UV35:%[0-9]+]]:_(s32), [[UV36:%[0-9]+]]:_(s32), [[UV37:%[0-9]+]]:_(s32), [[UV38:%[0-9]+]]:_(s32), [[UV39:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR7]](<5 x s32>)
+  ; CHECK:   ADJCALLSTACKUP 0, 0, implicit-def $scc
+  ; CHECK:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_8xv5f32
+  ; CHECK:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK:   $vgpr0 = COPY [[UV]](s32)
+  ; CHECK:   $vgpr1 = COPY [[UV1]](s32)
+  ; CHECK:   $vgpr2 = COPY [[UV2]](s32)
+  ; CHECK:   $vgpr3 = COPY [[UV3]](s32)
+  ; CHECK:   $vgpr4 = COPY [[UV4]](s32)
+  ; CHECK:   $vgpr5 = COPY [[UV5]](s32)
+  ; CHECK:   $vgpr6 = COPY [[UV6]](s32)
+  ; CHECK:   $vgpr7 = COPY [[UV7]](s32)
+  ; CHECK:   $vgpr8 = COPY [[UV8]](s32)
+  ; CHECK:   $vgpr9 = COPY [[UV9]](s32)
+  ; CHECK:   $vgpr10 = COPY [[UV10]](s32)
+  ; CHECK:   $vgpr11 = COPY [[UV11]](s32)
+  ; CHECK:   $vgpr12 = COPY [[UV12]](s32)
+  ; CHECK:   $vgpr13 = COPY [[UV13]](s32)
+  ; CHECK:   $vgpr14 = COPY [[UV14]](s32)
+  ; CHECK:   $vgpr15 = COPY [[UV15]](s32)
+  ; CHECK:   $vgpr16 = COPY [[UV16]](s32)
+  ; CHECK:   $vgpr17 = COPY [[UV17]](s32)
+  ; CHECK:   $vgpr18 = COPY [[UV18]](s32)
+  ; CHECK:   $vgpr19 = COPY [[UV19]](s32)
+  ; CHECK:   $vgpr20 = COPY [[UV20]](s32)
+  ; CHECK:   $vgpr21 = COPY [[UV21]](s32)
+  ; CHECK:   $vgpr22 = COPY [[UV22]](s32)
+  ; CHECK:   $vgpr23 = COPY [[UV23]](s32)
+  ; CHECK:   $vgpr24 = COPY [[UV24]](s32)
+  ; CHECK:   $vgpr25 = COPY [[UV25]](s32)
+  ; CHECK:   $vgpr26 = COPY [[UV26]](s32)
+  ; CHECK:   $vgpr27 = COPY [[UV27]](s32)
+  ; CHECK:   $vgpr28 = COPY [[UV28]](s32)
+  ; CHECK:   $vgpr29 = COPY [[UV29]](s32)
+  ; CHECK:   $vgpr30 = COPY [[UV30]](s32)
+  ; CHECK:   [[COPY17:%[0-9]+]]:_(p5) = COPY $sgpr32
+  ; CHECK:   [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; CHECK:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C16]](s32)
+  ; CHECK:   G_STORE [[UV31]](s32), [[PTR_ADD]](p5) :: (store 4 into stack, align 16, addrspace 5)
+  ; CHECK:   [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; CHECK:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C17]](s32)
+  ; CHECK:   G_STORE [[UV32]](s32), [[PTR_ADD1]](p5) :: (store 4 into stack + 4, addrspace 5)
+  ; CHECK:   [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; CHECK:   [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C18]](s32)
+  ; CHECK:   G_STORE [[UV33]](s32), [[PTR_ADD2]](p5) :: (store 4 into stack + 8, align 8, addrspace 5)
+  ; CHECK:   [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+  ; CHECK:   [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C19]](s32)
+  ; CHECK:   G_STORE [[UV34]](s32), [[PTR_ADD3]](p5) :: (store 4 into stack + 12, addrspace 5)
+  ; CHECK:   [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+  ; CHECK:   [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C20]](s32)
+  ; CHECK:   G_STORE [[UV35]](s32), [[PTR_ADD4]](p5) :: (store 4 into stack + 16, align 16, addrspace 5)
+  ; CHECK:   [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; CHECK:   [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C21]](s32)
+  ; CHECK:   G_STORE [[UV36]](s32), [[PTR_ADD5]](p5) :: (store 4 into stack + 20, addrspace 5)
+  ; CHECK:   [[C22:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+  ; CHECK:   [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C22]](s32)
+  ; CHECK:   G_STORE [[UV37]](s32), [[PTR_ADD6]](p5) :: (store 4 into stack + 24, align 8, addrspace 5)
+  ; CHECK:   [[C23:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
+  ; CHECK:   [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C23]](s32)
+  ; CHECK:   G_STORE [[UV38]](s32), [[PTR_ADD7]](p5) :: (store 4 into stack + 28, addrspace 5)
+  ; CHECK:   [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+  ; CHECK:   [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY17]], [[C24]](s32)
+  ; CHECK:   G_STORE [[UV39]](s32), [[PTR_ADD8]](p5) :: (store 4 into stack + 32, align 16, addrspace 5)
+  ; CHECK:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
+  ; CHECK:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
+  ; CHECK:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
+  ; CHECK:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
+  ; CHECK:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
+  ; CHECK:   $sgpr12 = COPY [[COPY13]](s32)
+  ; CHECK:   $sgpr13 = COPY [[COPY14]](s32)
+  ; CHECK:   $sgpr14 = COPY [[COPY15]](s32)
+  ; CHECK:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK:   $sgpr30_sgpr31 = SI_CALL [[GV]](p0), @external_void_func_8xv5f32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
+  ; CHECK:   ADJCALLSTACKDOWN 0, 36, implicit-def $scc
+  ; CHECK:   [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
+  ; CHECK:   S_SETPC_B64_return [[COPY19]]
+entry:
+  call void @external_void_func_8xv5f32(
+      <5 x float> <float 0.0, float 0.0, float 0.0, float 0.0, float 0.0>,
+      <5 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>,
+      <5 x float> <float 2.0, float 2.0, float 2.0, float 2.0, float 2.0>,
+      <5 x float> <float 3.0, float 3.0, float 3.0, float 3.0, float 3.0>,
+      <5 x float> <float 4.0, float 4.0, float 4.0, float 4.0, float 4.0>,
+      <5 x float> <float 5.0, float 5.0, float 5.0, float 5.0, float 5.0>,
+      <5 x float> <float 6.0, float 7.0, float 8.0, float 9.0, float 10.0>,
+      <5 x float> <float 11.0, float 12.0, float 13.0, float 14.0, float 15.0>)
+  ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind noinline }


        


More information about the llvm-commits mailing list