[llvm] r361608 - GlobalISel: support swifterror attribute on AArch64.

Tim Northover via llvm-commits llvm-commits at lists.llvm.org
Fri May 24 01:40:13 PDT 2019


Author: tnorthover
Date: Fri May 24 01:40:13 2019
New Revision: 361608

URL: http://llvm.org/viewvc/llvm-project?rev=361608&view=rev
Log:
GlobalISel: support swifterror attribute on AArch64.

swifterror marks an argument as a register pretending to be a pointer, so we
need a guaranteed mem2reg-like analysis of its uses. Fortunately most of the
infrastructure can be reused from the DAG world.

Added:
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/swifterror.ll
Modified:
    llvm/trunk/include/llvm/CodeGen/GlobalISel/CallLowering.h
    llvm/trunk/include/llvm/CodeGen/GlobalISel/IRTranslator.h
    llvm/trunk/lib/CodeGen/GlobalISel/CallLowering.cpp
    llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp
    llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp
    llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll

Modified: llvm/trunk/include/llvm/CodeGen/GlobalISel/CallLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/GlobalISel/CallLowering.h?rev=361608&r1=361607&r2=361608&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/GlobalISel/CallLowering.h (original)
+++ llvm/trunk/include/llvm/CodeGen/GlobalISel/CallLowering.h Fri May 24 01:40:13 2019
@@ -147,16 +147,39 @@ public:
   CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
   virtual ~CallLowering() = default;
 
+  /// \return true if the target is capable of handling swifterror values that
+  /// have been promoted to a specified register. The extended versions of
+  /// lowerReturn and lowerCall should be implemented.
+  virtual bool supportSwiftError() const {
+    return false;
+  }
+
   /// This hook must be implemented to lower outgoing return values, described
   /// by \p Val, into the specified virtual registers \p VRegs.
   /// This hook is used by GlobalISel.
   ///
+  /// \p SwiftErrorVReg is non-zero if the function has a swifterror parameter
+  /// that needs to be implicitly returned.
+  ///
   /// \return True if the lowering succeeds, false otherwise.
   virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
+                           ArrayRef<unsigned> VRegs,
+                           unsigned SwiftErrorVReg) const {
+    if (!supportSwiftError()) {
+      assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
+      return lowerReturn(MIRBuilder, Val, VRegs);
+    }
+    return false;
+  }
+
+  /// This hook behaves as the extended lowerReturn function, but for targets
+  /// that do not support swifterror value promotion.
+  virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
                            ArrayRef<unsigned> VRegs) const {
     return false;
   }
 
+
   /// This hook must be implemented to lower the incoming (formal)
   /// arguments, described by \p Args, for GlobalISel. Each argument
   /// must end up in the related virtual register described by VRegs.
@@ -180,20 +203,31 @@ public:
   /// \p Callee is the destination of the call. It should be either a register,
   /// globaladdress, or externalsymbol.
   ///
-  /// \p ResTy is the type returned by the function
+  /// \p OrigRet is a descriptor for the return type of the function.
   ///
-  /// \p ResReg is the generic virtual register that the returned
-  /// value should be lowered into.
+  /// \p OrigArgs is a list of descriptors of the arguments passed to the
+  /// function.
   ///
-  /// \p ArgTys is a list of the types each member of \p ArgRegs has; used by
-  /// the target to decide which register/stack slot should be allocated.
-  ///
-  /// \p ArgRegs is a list of virtual registers containing each argument that
-  /// needs to be passed.
+  /// \p SwiftErrorVReg is non-zero if the call has a swifterror inout
+  /// parameter, and contains the vreg that the swifterror should be copied into
+  /// after the call.
   ///
   /// \return true if the lowering succeeded, false otherwise.
   virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
                          const MachineOperand &Callee, const ArgInfo &OrigRet,
+                         ArrayRef<ArgInfo> OrigArgs,
+                         unsigned SwiftErrorVReg) const {
+    if (!supportSwiftError()) {
+      assert(SwiftErrorVReg == 0 && "trying to use unsupported swifterror");
+      return lowerCall(MIRBuilder, CallConv, Callee, OrigRet, OrigArgs);
+    }
+    return false;
+  }
+
+  /// This hook behaves as the extended lowerCall function, but for targets that
+  /// do not support swifterror value promotion.
+  virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
+                         const MachineOperand &Callee, const ArgInfo &OrigRet,
                          ArrayRef<ArgInfo> OrigArgs) const {
     return false;
   }
@@ -209,6 +243,10 @@ public:
   /// \p ArgRegs is a list of virtual registers containing each argument that
   /// needs to be passed.
   ///
+  /// \p SwiftErrorVReg is non-zero if the call has a swifterror inout
+  /// parameter, and contains the vreg that the swifterror should be copied into
+  /// after the call.
+  ///
   /// \p GetCalleeReg is a callback to materialize a register for the callee if
   /// the target determines it cannot jump to the destination based purely on \p
   /// CI. This might be because \p CI is indirect, or because of the limited
@@ -217,7 +255,9 @@ public:
   /// \return true if the lowering succeeded, false otherwise.
   bool lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
                  unsigned ResReg, ArrayRef<unsigned> ArgRegs,
+                 unsigned SwiftErrorVReg,
                  std::function<unsigned()> GetCalleeReg) const;
+
 };
 
 } // end namespace llvm

Modified: llvm/trunk/include/llvm/CodeGen/GlobalISel/IRTranslator.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/GlobalISel/IRTranslator.h?rev=361608&r1=361607&r2=361608&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/GlobalISel/IRTranslator.h (original)
+++ llvm/trunk/include/llvm/CodeGen/GlobalISel/IRTranslator.h Fri May 24 01:40:13 2019
@@ -22,6 +22,7 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
 #include "llvm/CodeGen/GlobalISel/Types.h"
+#include "llvm/CodeGen/SwiftErrorValueTracking.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/Support/Allocator.h"
@@ -163,6 +164,8 @@ private:
   /// this function.
   DenseMap<const AllocaInst *, int> FrameIndices;
 
+  SwiftErrorValueTracking SwiftError;
+
   /// \name Methods for translating form LLVM IR to MachineInstr.
   /// \see ::translate for general information on the translate methods.
   /// @{

Modified: llvm/trunk/lib/CodeGen/GlobalISel/CallLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/CallLowering.cpp?rev=361608&r1=361607&r2=361608&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/CallLowering.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/CallLowering.cpp Fri May 24 01:40:13 2019
@@ -26,9 +26,10 @@ using namespace llvm;
 
 void CallLowering::anchor() {}
 
-bool CallLowering::lowerCall(
-    MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, unsigned ResReg,
-    ArrayRef<unsigned> ArgRegs, std::function<unsigned()> GetCalleeReg) const {
+bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
+                             unsigned ResReg, ArrayRef<unsigned> ArgRegs,
+                             unsigned SwiftErrorVReg,
+                             std::function<unsigned()> GetCalleeReg) const {
   auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout();
 
   // First step is to marshall all the function's parameters into the correct
@@ -41,8 +42,8 @@ bool CallLowering::lowerCall(
     ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{},
                     i < NumFixedArgs};
     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS);
-    // We don't currently support swifterror or swiftself args.
-    if (OrigArg.Flags.isSwiftError() || OrigArg.Flags.isSwiftSelf())
+    // We don't currently support swiftself args.
+    if (OrigArg.Flags.isSwiftSelf())
       return false;
     OrigArgs.push_back(OrigArg);
     ++i;
@@ -58,7 +59,8 @@ bool CallLowering::lowerCall(
   if (!OrigRet.Ty->isVoidTy())
     setArgFlags(OrigRet, AttributeList::ReturnIndex, DL, CS);
 
-  return lowerCall(MIRBuilder, CS.getCallingConv(), Callee, OrigRet, OrigArgs);
+  return lowerCall(MIRBuilder, CS.getCallingConv(), Callee, OrigRet, OrigArgs,
+                   SwiftErrorVReg);
 }
 
 template <typename FuncInfoTy>

Modified: llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp?rev=361608&r1=361607&r2=361608&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp Fri May 24 01:40:13 2019
@@ -354,11 +354,16 @@ bool IRTranslator::translateRet(const Us
   if (Ret)
     VRegs = getOrCreateVRegs(*Ret);
 
+  unsigned SwiftErrorVReg = 0;
+  if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
+    SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
+        &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
+  }
+
   // The target may mess up with the insertion point, but
   // this is not important as a return is the last instruction
   // of the block anyway.
-
-  return CLI->lowerReturn(MIRBuilder, Ret, VRegs);
+  return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg);
 }
 
 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
@@ -447,6 +452,14 @@ bool IRTranslator::translateIndirectBr(c
   return true;
 }
 
+static bool isSwiftError(const Value *V) {
+  if (auto Arg = dyn_cast<Argument>(V))
+    return Arg->hasSwiftErrorAttr();
+  if (auto AI = dyn_cast<AllocaInst>(V))
+    return AI->isSwiftError();
+  return false;
+}
+
 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
   const LoadInst &LI = cast<LoadInst>(U);
 
@@ -464,6 +477,15 @@ bool IRTranslator::translateLoad(const U
   Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
 
+  if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
+    assert(Regs.size() == 1 && "swifterror should be single pointer");
+    unsigned VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
+                                                    LI.getPointerOperand());
+    MIRBuilder.buildCopy(Regs[0], VReg);
+    return true;
+  }
+
+
   for (unsigned i = 0; i < Regs.size(); ++i) {
     unsigned Addr = 0;
     MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
@@ -496,6 +518,15 @@ bool IRTranslator::translateStore(const
   Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
 
+  if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
+    assert(Vals.size() == 1 && "swifterror should be single pointer");
+
+    unsigned VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
+                                                    SI.getPointerOperand());
+    MIRBuilder.buildCopy(VReg, Vals[0]);
+    return true;
+  }
+
   for (unsigned i = 0; i < Vals.size(); ++i) {
     unsigned Addr = 0;
     MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
@@ -1154,16 +1185,29 @@ bool IRTranslator::translateCall(const U
                                : getOrCreateVReg(CI);
 
     SmallVector<unsigned, 8> Args;
-    for (auto &Arg: CI.arg_operands())
+    unsigned SwiftErrorVReg = 0;
+    for (auto &Arg: CI.arg_operands()) {
+      if (CLI->supportSwiftError() && isSwiftError(Arg)) {
+        LLT Ty = getLLTForType(*Arg->getType(), *DL);
+        unsigned InVReg = MRI->createGenericVirtualRegister(Ty);
+        MIRBuilder.buildCopy(InVReg, SwiftError.getOrCreateVRegUseAt(
+                                         &CI, &MIRBuilder.getMBB(), Arg));
+        Args.push_back(InVReg);
+        SwiftErrorVReg =
+            SwiftError.getOrCreateVRegDefAt(&CI, &MIRBuilder.getMBB(), Arg);
+        continue;
+      }
       Args.push_back(packRegs(*Arg, MIRBuilder));
+    }
 
     MF->getFrameInfo().setHasCalls(true);
-    bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
-      return getOrCreateVReg(*CI.getCalledValue());
-    });
+    bool Success =
+        CLI->lowerCall(MIRBuilder, &CI, Res, Args, SwiftErrorVReg,
+                       [&]() { return getOrCreateVReg(*CI.getCalledValue()); });
 
     if (IsSplitType)
       unpackRegs(CI, Res, MIRBuilder);
+
     return Success;
   }
 
@@ -1239,10 +1283,23 @@ bool IRTranslator::translateInvoke(const
   if (!I.getType()->isVoidTy())
     Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
   SmallVector<unsigned, 8> Args;
-  for (auto &Arg: I.arg_operands())
+  unsigned SwiftErrorVReg = 0;
+  for (auto &Arg : I.arg_operands()) {
+    if (CLI->supportSwiftError() && isSwiftError(Arg)) {
+      LLT Ty = getLLTForType(*Arg->getType(), *DL);
+      unsigned InVReg = MRI->createGenericVirtualRegister(Ty);
+      MIRBuilder.buildCopy(InVReg, SwiftError.getOrCreateVRegUseAt(
+                                       &I, &MIRBuilder.getMBB(), Arg));
+      Args.push_back(InVReg);
+      SwiftErrorVReg =
+          SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
+      continue;
+    }
+
     Args.push_back(packRegs(*Arg, MIRBuilder));
+  }
 
-  if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
+  if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, SwiftErrorVReg,
                       [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
     return false;
 
@@ -1331,7 +1388,7 @@ bool IRTranslator::translateAlloca(const
   auto &AI = cast<AllocaInst>(U);
 
   if (AI.isSwiftError())
-    return false;
+    return true;
 
   if (AI.isStaticAlloca()) {
     unsigned Res = getOrCreateVReg(AI);
@@ -1776,6 +1833,10 @@ bool IRTranslator::runOnMachineFunction(
   MF->push_back(EntryBB);
   EntryBuilder->setMBB(*EntryBB);
 
+  DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
+  SwiftError.setFunction(CurMF);
+  SwiftError.createEntriesInEntryBlock(DbgLoc);
+
   // Create all blocks, in IR order, to preserve the layout.
   for (const BasicBlock &BB: F) {
     auto *&MBB = BBToMBB[&BB];
@@ -1797,14 +1858,18 @@ bool IRTranslator::runOnMachineFunction(
       continue; // Don't handle zero sized types.
     VRegArgs.push_back(
         MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
+
+    if (Arg.hasSwiftErrorAttr())
+      SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(),
+                                VRegArgs.back());
   }
 
   // We don't currently support translating swifterror or swiftself functions.
   for (auto &Arg : F.args()) {
-    if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) {
+    if (Arg.hasSwiftSelfAttr()) {
       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
                                  F.getSubprogram(), &F.getEntryBlock());
-      R << "unable to lower arguments due to swifterror/swiftself: "
+      R << "unable to lower arguments due to swiftself: "
         << ore::NV("Prototype", F.getType());
       reportTranslationError(*MF, *TPC, *ORE, R);
       return false;
@@ -1880,6 +1945,8 @@ bool IRTranslator::runOnMachineFunction(
 
   finishPendingPhis();
 
+  SwiftError.propagateVRegs();
+
   // Merge the argument lowering and constants block with its single
   // successor, the LLVM-IR entry block.  We want the basic block to
   // be maximal.

Modified: llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp?rev=361608&r1=361607&r2=361608&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp Fri May 24 01:40:13 2019
@@ -232,7 +232,8 @@ void AArch64CallLowering::splitToValueTy
 
 bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
                                       const Value *Val,
-                                      ArrayRef<unsigned> VRegs) const {
+                                      ArrayRef<unsigned> VRegs,
+                                      unsigned SwiftErrorVReg) const {
   auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
   assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
          "Return value without a vreg");
@@ -340,6 +341,11 @@ bool AArch64CallLowering::lowerReturn(Ma
     Success = handleAssignments(MIRBuilder, SplitArgs, Handler);
   }
 
+  if (SwiftErrorVReg) {
+    MIB.addUse(AArch64::X21, RegState::Implicit);
+    MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
+  }
+
   MIRBuilder.insertInstr(MIB);
   return Success;
 }
@@ -420,7 +426,8 @@ bool AArch64CallLowering::lowerCall(Mach
                                     CallingConv::ID CallConv,
                                     const MachineOperand &Callee,
                                     const ArgInfo &OrigRet,
-                                    ArrayRef<ArgInfo> OrigArgs) const {
+                                    ArrayRef<ArgInfo> OrigArgs,
+                                    unsigned SwiftErrorVReg) const {
   MachineFunction &MF = MIRBuilder.getMF();
   const Function &F = MF.getFunction();
   MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -503,6 +510,11 @@ bool AArch64CallLowering::lowerCall(Mach
       MIRBuilder.buildSequence(OrigRet.Reg, SplitRegs, RegOffsets);
   }
 
+  if (SwiftErrorVReg) {
+    MIB.addDef(AArch64::X21, RegState::Implicit);
+    MIRBuilder.buildCopy(SwiftErrorVReg, AArch64::X21);
+  }
+
   CallSeqStart.addImm(Handler.StackSize).addImm(0);
   MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
       .addImm(Handler.StackSize)

Modified: llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h?rev=361608&r1=361607&r2=361608&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h Fri May 24 01:40:13 2019
@@ -34,14 +34,24 @@ public:
   AArch64CallLowering(const AArch64TargetLowering &TLI);
 
   bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
-                   ArrayRef<unsigned> VRegs) const override;
+                   ArrayRef<unsigned> VRegs,
+                   unsigned SwiftErrorVReg) const override;
 
   bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
                             ArrayRef<unsigned> VRegs) const override;
 
   bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
                  const MachineOperand &Callee, const ArgInfo &OrigRet,
-                 ArrayRef<ArgInfo> OrigArgs) const override;
+                 ArrayRef<ArgInfo> OrigArgs,
+                 unsigned SwiftErrorVReg) const override;
+
+  bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
+                 const MachineOperand &Callee, const ArgInfo &OrigRet,
+                 ArrayRef<ArgInfo> OrigArgs) const override {
+    return lowerCall(MIRBuilder, CallConv, Callee, OrigRet, OrigArgs, 0);
+  }
+
+  bool supportSwiftError() const override { return true; }
 
 private:
   using RegHandler = std::function<void(MachineIRBuilder &, Type *, unsigned,

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll?rev=361608&r1=361607&r2=361608&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll Fri May 24 01:40:13 2019
@@ -230,23 +230,8 @@ define void @nonpow2_vector_add_fewerele
 
 %swift_error = type {i64, i8}
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to lower arguments due to swifterror/swiftself: void (%swift_error**)* (in function: swifterror_param)
-; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for swifterror_param
-define void @swifterror_param(%swift_error** swifterror %error_ptr_ref) {
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to lower arguments due to swiftself: void (%swift_error**)* (in function: swiftself_param)
+; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for swiftself_param
+define void @swiftself_param(%swift_error** swiftself %error_ptr_ref) {
   ret void
 }
-
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: alloca: '  %error_ptr_ref = alloca swifterror %swift_error*' (in function: swifterror_alloca)
-; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for swifterror_alloca
-; We can't currently test the call parameters being swifterror because the value
-; must come from a swifterror alloca or parameter, at which point we already
-; fallback. As long as those cases work however we should be fine.
-define void @swifterror_alloca(i8* %error_ref) {
-entry:
-  %error_ptr_ref = alloca swifterror %swift_error*
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  call void @swifterror_param(%swift_error** swifterror %error_ptr_ref)
-  ret void
-}
-
-

Added: llvm/trunk/test/CodeGen/AArch64/GlobalISel/swifterror.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/swifterror.ll?rev=361608&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/swifterror.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/swifterror.ll Fri May 24 01:40:13 2019
@@ -0,0 +1,518 @@
+; RUN: llc -verify-machineinstrs -frame-pointer=all -global-isel < %s -mtriple=aarch64-apple-ios -disable-post-ra | FileCheck %s
+
+declare i8* @malloc(i64)
+declare void @free(i8*)
+%swift_error = type {i64, i8}
+
+; This tests the basic usage of a swifterror parameter. "foo" is the function
+; that takes a swifterror parameter and "caller" is the caller of "foo".
+define float @foo(%swift_error** swifterror %error_ptr_ref) {
+; CHECK-LABEL: foo:
+; CHECK: mov [[ID:w[0-9]+]], #1
+; CHECK: mov x0, #16
+; CHECK: malloc
+; CHECK: strb [[ID]], [x0, #8]
+; CHECK: mov x21, x0
+; CHECK-NOT: x21
+
+entry:
+  %call = call i8* @malloc(i64 16)
+  %call.0 = bitcast i8* %call to %swift_error*
+  store %swift_error* %call.0, %swift_error** %error_ptr_ref
+  %tmp = getelementptr inbounds i8, i8* %call, i64 8
+  store i8 1, i8* %tmp
+  ret float 1.0
+}
+
+; "caller" calls "foo" that takes a swifterror parameter.
+define float @caller(i8* %error_ref) {
+; CHECK-LABEL: caller:
+; CHECK: mov [[ID:x[0-9]+]], x0
+; CHECK: mov [[ZERO:x[0-9]+]], #0
+; CHECK: mov x21, #0
+; CHECK: bl {{.*}}foo
+; CHECK: mov x0, x21
+; CHECK: cmp x21, [[ZERO]]
+; CHECK: b.ne
+; Access part of the error object and save it to error_ref
+; CHECK: ldrb [[CODE:w[0-9]+]], [x0, #8]
+; CHECK: strb [[CODE]], [{{.*}}[[ID]]]
+; CHECK: bl {{.*}}free
+
+entry:
+  %error_ptr_ref = alloca swifterror %swift_error*
+  store %swift_error* null, %swift_error** %error_ptr_ref
+  %call = call float @foo(%swift_error** swifterror %error_ptr_ref)
+  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
+  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
+  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  br i1 %had_error_from_foo, label %handler, label %cont
+cont:
+  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
+  %t = load i8, i8* %v1
+  store i8 %t, i8* %error_ref
+  br label %handler
+handler:
+  call void @free(i8* %tmp)
+  ret float 1.0
+}
+
+; "caller2" is the caller of "foo", it calls "foo" inside a loop.
+define float @caller2(i8* %error_ref) {
+; CHECK-LABEL: caller2:
+; CHECK: mov [[ID:x[0-9]+]], x0
+; CHECK: mov [[ZERO:x[0-9]+]], #0
+; CHECK: fmov [[CMP:s[0-9]+]], #1.0
+; CHECK: mov x21, #0
+; CHECK: bl {{.*}}foo
+; CHECK: cmp x21, [[ZERO]]
+; CHECK: b.ne
+; CHECK: fcmp s0, [[CMP]]
+; CHECK: b.le
+; Access part of the error object and save it to error_ref
+; CHECK: ldrb [[CODE:w[0-9]+]], [x21, #8]
+; CHECK: strb [[CODE]], [{{.*}}[[ID]]]
+; CHECK: mov x0, x21
+; CHECK: bl {{.*}}free
+
+entry:
+  %error_ptr_ref = alloca swifterror %swift_error*
+  br label %bb_loop
+bb_loop:
+  store %swift_error* null, %swift_error** %error_ptr_ref
+  %call = call float @foo(%swift_error** swifterror %error_ptr_ref)
+  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
+  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
+  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  br i1 %had_error_from_foo, label %handler, label %cont
+cont:
+  %cmp = fcmp ogt float %call, 1.000000e+00
+  br i1 %cmp, label %bb_end, label %bb_loop
+bb_end:
+  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
+  %t = load i8, i8* %v1
+  store i8 %t, i8* %error_ref
+  br label %handler
+handler:
+  call void @free(i8* %tmp)
+  ret float 1.0
+}
+
+; "foo_if" is a function that takes a swifterror parameter, it sets swifterror
+; under a certain condition.
+define float @foo_if(%swift_error** swifterror %error_ptr_ref, i32 %cc) {
+; CHECK-LABEL: foo_if:
+; CHECK: cbz w0
+; CHECK: mov [[ID:w[0-9]+]], #1
+; CHECK: mov x0, #16
+; CHECK: malloc
+; CHECK: strb [[ID]], [x0, #8]
+; CHECK: mov x21, x0
+; CHECK-NOT: x21
+; CHECK: ret
+
+entry:
+  %cond = icmp ne i32 %cc, 0
+  br i1 %cond, label %gen_error, label %normal
+
+gen_error:
+  %call = call i8* @malloc(i64 16)
+  %call.0 = bitcast i8* %call to %swift_error*
+  store %swift_error* %call.0, %swift_error** %error_ptr_ref
+  %tmp = getelementptr inbounds i8, i8* %call, i64 8
+  store i8 1, i8* %tmp
+  ret float 1.0
+
+normal:
+  ret float 0.0
+}
+
+; "foo_loop" is a function that takes a swifterror parameter, it sets swifterror
+; under a certain condition inside a loop.
+define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float %cc2) {
+; CHECK-LABEL: foo_loop:
+; CHECK: cbz
+; CHECK: mov x0, #16
+; CHECK: malloc
+; CHECK: mov x21, x0
+; CHECK: strb w{{.*}}, [x0, #8]
+; CHECK: fcmp
+; CHECK: b.le
+; CHECK: ret
+
+entry:
+  br label %bb_loop
+
+bb_loop:
+  %cond = icmp ne i32 %cc, 0
+  br i1 %cond, label %gen_error, label %bb_cont
+
+gen_error:
+  %call = call i8* @malloc(i64 16)
+  %call.0 = bitcast i8* %call to %swift_error*
+  store %swift_error* %call.0, %swift_error** %error_ptr_ref
+  %tmp = getelementptr inbounds i8, i8* %call, i64 8
+  store i8 1, i8* %tmp
+  br label %bb_cont
+
+bb_cont:
+  %cmp = fcmp ogt float %cc2, 1.000000e+00
+  br i1 %cmp, label %bb_end, label %bb_loop
+bb_end:
+  ret float 0.0
+}
+
+%struct.S = type { i32, i32, i32, i32, i32, i32 }
+
+; "foo_sret" is a function that takes a swifterror parameter, it also has a sret
+; parameter.
+define void @foo_sret(%struct.S* sret %agg.result, i32 %val1, %swift_error** swifterror %error_ptr_ref) {
+; CHECK-LABEL: foo_sret:
+; CHECK: mov [[SRET:x[0-9]+]], x8
+; CHECK: mov [[ID:w[0-9]+]], #1
+; CHECK: mov x0, #16
+; CHECK: malloc
+; CHECK: strb [[ID]], [x0, #8]
+; CHECK: str w{{.*}}, [{{.*}}[[SRET]], #4]
+; CHECK: mov x21, x0
+; CHECK-NOT: x21
+
+entry:
+  %call = call i8* @malloc(i64 16)
+  %call.0 = bitcast i8* %call to %swift_error*
+  store %swift_error* %call.0, %swift_error** %error_ptr_ref
+  %tmp = getelementptr inbounds i8, i8* %call, i64 8
+  store i8 1, i8* %tmp
+  %v2 = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 1
+  store i32 %val1, i32* %v2
+  ret void
+}
+
+; "caller3" calls "foo_sret" that takes a swifterror parameter.
+define float @caller3(i8* %error_ref) {
+; CHECK-LABEL: caller3:
+; CHECK: mov [[ID:x[0-9]+]], x0
+; CHECK: mov [[ZERO:x[0-9]+]], #0
+; CHECK: mov x21, #0
+; CHECK: bl {{.*}}foo_sret
+; CHECK: mov x0, x21
+; CHECK: cmp x21, [[ZERO]]
+; CHECK: b.ne
+; Access part of the error object and save it to error_ref
+; CHECK: ldrb [[CODE:w[0-9]+]], [x0, #8]
+; CHECK: strb [[CODE]], [{{.*}}[[ID]]]
+; CHECK: bl {{.*}}free
+
+entry:
+  %s = alloca %struct.S, align 8
+  %error_ptr_ref = alloca swifterror %swift_error*
+  store %swift_error* null, %swift_error** %error_ptr_ref
+  call void @foo_sret(%struct.S* sret %s, i32 1, %swift_error** swifterror %error_ptr_ref)
+  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
+  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
+  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  br i1 %had_error_from_foo, label %handler, label %cont
+cont:
+  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
+  %t = load i8, i8* %v1
+  store i8 %t, i8* %error_ref
+  br label %handler
+handler:
+  call void @free(i8* %tmp)
+  ret float 1.0
+}
+
+; "foo_vararg" is a function that takes a swifterror parameter, it also has
+; variable number of arguments.
+declare void @llvm.va_start(i8*) nounwind
+define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) {
+; CHECK-LABEL: foo_vararg:
+; CHECK-DAG: mov [[ID:w[0-9]+]], #1
+; CHECK: mov x0, #16
+; CHECK: malloc
+; CHECK-DAG: strb [[ID]], [x0, #8]
+
+; First vararg
+; CHECK: ldr {{w[0-9]+}}, [x[[ARG1:[0-9]+]]]
+; Second vararg
+; CHECK: mov [[EIGHT:x[0-9]+]], #8
+; CHECK: add x[[ARG2:[0-9]+]], x[[ARG1]], [[EIGHT]]
+; CHECK: ldr {{w[0-9]+}}, [x[[ARG2]]]
+; Third vararg
+; CHECK: add x[[ARG3:[0-9]+]], x[[ARG2]], [[EIGHT]]
+; CHECK: ldr {{w[0-9]+}}, [x[[ARG3]]]
+
+; CHECK: mov x21, x0
+; CHECK-NOT: x21
+entry:
+  %call = call i8* @malloc(i64 16)
+  %call.0 = bitcast i8* %call to %swift_error*
+  store %swift_error* %call.0, %swift_error** %error_ptr_ref
+  %tmp = getelementptr inbounds i8, i8* %call, i64 8
+  store i8 1, i8* %tmp
+
+  %args = alloca i8*, align 8
+  %a10 = alloca i32, align 4
+  %a11 = alloca i32, align 4
+  %a12 = alloca i32, align 4
+  %v10 = bitcast i8** %args to i8*
+  call void @llvm.va_start(i8* %v10)
+  %v11 = va_arg i8** %args, i32
+  store i32 %v11, i32* %a10, align 4
+  %v12 = va_arg i8** %args, i32
+  store i32 %v12, i32* %a11, align 4
+  %v13 = va_arg i8** %args, i32
+  store i32 %v13, i32* %a12, align 4
+
+  ret float 1.0
+}
+
+; "caller4" calls "foo_vararg" that takes a swifterror parameter.
+define float @caller4(i8* %error_ref) {
+; CHECK-LABEL: caller4:
+
+; CHECK: mov [[ID:x[0-9]+]], x0
+; CHECK: mov [[ZERO:x[0-9]+]], #0
+; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp]
+; CHECK: mov x21, #0
+; CHECK: str {{x[0-9]+}}, [sp, #16]
+
+; CHECK: bl {{.*}}foo_vararg
+; CHECK: mov x0, x21
+; CHECK: cmp x21, [[ZERO]]
+; CHECK: b.ne
+; Access part of the error object and save it to error_ref
+; CHECK: ldrb [[CODE:w[0-9]+]], [x0, #8]
+; CHECK: strb [[CODE]], [{{.*}}[[ID]]]
+; CHECK: bl {{.*}}free
+entry:
+  %error_ptr_ref = alloca swifterror %swift_error*
+  store %swift_error* null, %swift_error** %error_ptr_ref
+
+  %a10 = alloca i32, align 4
+  %a11 = alloca i32, align 4
+  %a12 = alloca i32, align 4
+  store i32 10, i32* %a10, align 4
+  store i32 11, i32* %a11, align 4
+  store i32 12, i32* %a12, align 4
+  %v10 = load i32, i32* %a10, align 4
+  %v11 = load i32, i32* %a11, align 4
+  %v12 = load i32, i32* %a12, align 4
+
+  %call = call float (%swift_error**, ...) @foo_vararg(%swift_error** swifterror %error_ptr_ref, i32 %v10, i32 %v11, i32 %v12)
+  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
+  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
+  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  br i1 %had_error_from_foo, label %handler, label %cont
+
+cont:
+  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
+  %t = load i8, i8* %v1
+  store i8 %t, i8* %error_ref
+  br label %handler
+handler:
+  call void @free(i8* %tmp)
+  ret float 1.0
+}
+
+; Check that we don't blow up on tail calling swifterror argument functions.
+define float @tailcallswifterror(%swift_error** swifterror %error_ptr_ref) {
+entry:
+  %0 = tail call float @tailcallswifterror(%swift_error** swifterror %error_ptr_ref)
+  ret float %0
+}
+define swiftcc float @tailcallswifterror_swiftcc(%swift_error** swifterror %error_ptr_ref) {
+entry:
+  %0 = tail call swiftcc float @tailcallswifterror_swiftcc(%swift_error** swifterror %error_ptr_ref)
+  ret float %0
+}
+
+; CHECK-LABEL: params_in_reg
+; Save callee saved registers and swifterror since it will be clobbered by the first call to params_in_reg2.
+; CHECK:  stp     x28, x0, [sp
+; CHECK:  stp     x27, x26, [sp
+; CHECK:  stp     x25, x24, [sp
+; CHECK:  stp     x23, x22, [sp
+; CHECK:  stp     x20, x19, [sp
+; CHECK:  stp     x29, x30, [sp
+; Store argument registers.
+; CHECK:  mov      x20, x1
+; CHECK:  mov      x22, x2
+; CHECK:  mov      x23, x3
+; CHECK:  mov      x24, x4
+; CHECK:  mov      x25, x5
+; CHECK:  mov      x26, x6
+; CHECK:  mov      x27, x7
+; CHECK:  mov      x28, x21
+; Setup call.
+; CHECK:  mov     x8, #0
+; CHECK:  mov     x0, #1
+; CHECK:  mov     x1, #2
+; CHECK:  mov     x2, #3
+; CHECK:  mov     x3, #4
+; CHECK:  mov     x4, #5
+; CHECK:  mov     x5, #6
+; CHECK:  mov     x6, #7
+; CHECK:  mov     x7, #8
+; CHECK:  mov      x21, #0
+; CHECK:  bl      _params_in_reg2
+; Restore original arguments for next call.
+; CHECK:  ldr      x0, [sp
+; CHECK:  mov      x1, x20
+; CHECK:  mov      x2, x22
+; CHECK:  mov      x3, x23
+; CHECK:  mov      x4, x24
+; CHECK:  mov      x5, x25
+; CHECK:  mov      x6, x26
+; CHECK:  mov      x7, x27
+; Restore original swiftself argument and swifterror %err.
+; CHECK:  mov      x21, x28
+; CHECK:  bl      _params_in_reg2
+; Restore calle save registers but don't clober swifterror x21.
+; CHECK-NOT: x21
+; CHECK:  ldp     x29, x30, [sp
+; CHECK-NOT: x21
+; CHECK:  ldp     x20, x19, [sp
+; CHECK-NOT: x21
+; CHECK:  ldp     x23, x22, [sp
+; CHECK-NOT: x21
+; CHECK:  ldp     x25, x24, [sp
+; CHECK-NOT: x21
+; CHECK:  ldp     x27, x26, [sp
+; CHECK-NOT: x21
+; CHECK:  ldr     x28, [sp
+; CHECK-NOT: x21
+; CHECK:  ret
+define swiftcc void @params_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8*, %swift_error** nocapture swifterror %err) {
+  %error_ptr_ref = alloca swifterror %swift_error*, align 8
+  store %swift_error* null, %swift_error** %error_ptr_ref
+  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i8*  null, %swift_error** nocapture swifterror %error_ptr_ref)
+  call swiftcc void @params_in_reg2(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, i8*  %8, %swift_error** nocapture swifterror %err)
+  ret void
+}
+declare swiftcc void @params_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8* , %swift_error** nocapture swifterror %err)
+
+; CHECK-LABEL: params_and_return_in_reg
+; Store callee saved registers.
+; CHECK:  stp     x28, x0, [sp, #16
+; CHECK:  stp     x27, x26, [sp
+; CHECK:  stp     x25, x24, [sp
+; CHECK:  stp     x23, x22, [sp
+; CHECK:  stp     x20, x19, [sp
+; CHECK:  stp     x29, x30, [sp
+; Save original arguments.
+; CHECK:  mov      x20, x1
+; CHECK:  mov      x22, x2
+; CHECK:  mov      x23, x3
+; CHECK:  mov      x24, x4
+; CHECK:  mov      x25, x5
+; CHECK:  mov      x26, x6
+; CHECK:  mov      x27, x7
+; CHECK:  mov      x28, x21
+; Setup call arguments.
+; CHECK:  mov     x0, #1
+; CHECK:  mov     x1, #2
+; CHECK:  mov     x2, #3
+; CHECK:  mov     x3, #4
+; CHECK:  mov     x4, #5
+; CHECK:  mov     x5, #6
+; CHECK:  mov     x6, #7
+; CHECK:  mov     x7, #8
+; CHECK:  mov      x21, #0
+; CHECK:  bl      _params_in_reg2
+; Store swifterror %error_ptr_ref.
+; CHECK:  stp     {{x[0-9]+}}, x21, [sp]
+; Setup call arguments from original arguments.
+; CHECK:  ldr      x0, [sp, #24
+; CHECK:  mov      x1, x20
+; CHECK:  mov      x2, x22
+; CHECK:  mov      x3, x23
+; CHECK:  mov      x4, x24
+; CHECK:  mov      x5, x25
+; CHECK:  mov      x6, x26
+; CHECK:  mov      x7, x27
+; CHECK:  mov      x21, x28
+; CHECK:  bl      _params_and_return_in_reg2
+; Store return values.
+; CHECK:  mov      x20, x0
+; CHECK:  mov      x22, x1
+; CHECK:  mov      x23, x2
+; CHECK:  mov      x24, x3
+; CHECK:  mov      x25, x4
+; CHECK:  mov      x26, x5
+; CHECK:  mov      x27, x6
+; CHECK:  mov      x28, x7
+; Save swifterror %err.
+; CHECK:  mov      x19, x21
+; Setup call.
+; CHECK:  mov     x0, #1
+; CHECK:  mov     x1, #2
+; CHECK:  mov     x2, #3
+; CHECK:  mov     x3, #4
+; CHECK:  mov     x4, #5
+; CHECK:  mov     x5, #6
+; CHECK:  mov     x6, #7
+; CHECK:  mov     x7, #8
+; ... setup call with swiferror %error_ptr_ref.
+; CHECK:  ldr     x21, [sp, #8]
+; CHECK:  bl      _params_in_reg2
+; Restore return values for return from this function.
+; CHECK:  mov      x0, x20
+; CHECK:  mov      x1, x22
+; CHECK:  mov      x2, x23
+; CHECK:  mov      x3, x24
+; CHECK:  mov      x4, x25
+; CHECK:  mov      x5, x26
+; CHECK:  mov      x6, x27
+; CHECK:  mov      x7, x28
+; CHECK:  mov      x21, x19
+; Restore callee save registers.
+; CHECK:  ldp     x29, x30, [sp
+; CHECK:  ldp     x20, x19, [sp
+; CHECK:  ldp     x23, x22, [sp
+; CHECK:  ldp     x25, x24, [sp
+; CHECK:  ldp     x27, x26, [sp
+; CHECK:  ldr     x28, [sp
+; CHECK:  ret
+define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* , %swift_error** nocapture swifterror %err) {
+  %error_ptr_ref = alloca swifterror %swift_error*, align 8
+  store %swift_error* null, %swift_error** %error_ptr_ref
+  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i8*  null, %swift_error** nocapture swifterror %error_ptr_ref)
+  %val = call swiftcc  { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg2(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, i8*  %8, %swift_error** nocapture swifterror %err)
+  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i8*  null, %swift_error** nocapture swifterror %error_ptr_ref)
+  ret { i64, i64, i64, i64, i64, i64, i64, i64 } %val
+}
+
+declare swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8* , %swift_error** nocapture swifterror %err)
+
+declare void @acallee(i8*)
+
+; Make sure we don't tail call if the caller returns a swifterror value. We
+; would have to move into the swifterror register before the tail call.
+; CHECK: tailcall_from_swifterror:
+; CHECK-NOT: b _acallee
+; CHECK: bl _acallee
+
+define swiftcc void @tailcall_from_swifterror(%swift_error** swifterror %error_ptr_ref) {
+entry:
+  tail call void @acallee(i8* null)
+  ret void
+}
+
+declare swiftcc void @foo2(%swift_error** swifterror)
+; CHECK-LABEL: testAssign
+; CHECK: mov      x21, #0
+; CHECK: bl      _foo2
+; CHECK: mov      x0, x21
+
+define swiftcc %swift_error* @testAssign(i8* %error_ref) {
+entry:
+  %error_ptr = alloca swifterror %swift_error*
+  store %swift_error* null, %swift_error** %error_ptr
+  call swiftcc void @foo2(%swift_error** swifterror %error_ptr)
+  br label %a
+
+a:
+  %error = load %swift_error*, %swift_error** %error_ptr
+  ret %swift_error* %error
+}




More information about the llvm-commits mailing list