[llvm-branch-commits] [llvm] f784be0 - [VE] Support SJLJ exception related instructions

Kazushi Marukawa via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Jan 5 03:23:59 PST 2021


Author: Kazushi (Jam) Marukawa
Date: 2021-01-05T20:19:15+09:00
New Revision: f784be0777f34a5b3bc3da6892ca242de7840fce

URL: https://github.com/llvm/llvm-project/commit/f784be0777f34a5b3bc3da6892ca242de7840fce
DIFF: https://github.com/llvm/llvm-project/commit/f784be0777f34a5b3bc3da6892ca242de7840fce.diff

LOG: [VE] Support SJLJ exception related instructions

Support EH_SJLJ_LONGJMP, EH_SJLJ_SETJMP, and EH_SJLJ_SETUP_DISPATCH
for SjLj exception handling.  NC++ uses SjLj exception handling, so
implement it first.  Add regression tests also.

Reviewed By: simoll

Differential Revision: https://reviews.llvm.org/D94071

Added: 
    llvm/lib/Target/VE/VEInstrBuilder.h
    llvm/test/CodeGen/VE/Scalar/builtin_sjlj.ll
    llvm/test/CodeGen/VE/Scalar/builtin_sjlj_bp.ll
    llvm/test/CodeGen/VE/Scalar/builtin_sjlj_callsite.ll
    llvm/test/CodeGen/VE/Scalar/builtin_sjlj_landingpad.ll

Modified: 
    llvm/lib/Target/VE/VEISelLowering.cpp
    llvm/lib/Target/VE/VEISelLowering.h
    llvm/lib/Target/VE/VEInstrInfo.td

Removed: 
    llvm/test/CodeGen/VE/Scalar/sjlj_except.ll


################################################################################
diff  --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index e83483dd13fb..230ce42d46b3 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -13,6 +13,7 @@
 
 #include "VEISelLowering.h"
 #include "MCTargetDesc/VEMCExpr.h"
+#include "VEInstrBuilder.h"
 #include "VEMachineFunctionInfo.h"
 #include "VERegisterInfo.h"
 #include "VETargetMachine.h"
@@ -276,6 +277,14 @@ void VETargetLowering::initSPUActions() {
 
   /// } Atomic instructions
 
+  /// SJLJ instructions {
+  setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
+  setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
+  setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
+  if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
+    setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
+  /// } SJLJ instructions
+
   // Intrinsic instructions
   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
 }
@@ -864,6 +873,9 @@ const char *VETargetLowering::getTargetNodeName(unsigned Opcode) const {
   case VEISD::FIRST_NUMBER:
     break;
     TARGET_NODE_CASE(CALL)
+    TARGET_NODE_CASE(EH_SJLJ_LONGJMP)
+    TARGET_NODE_CASE(EH_SJLJ_SETJMP)
+    TARGET_NODE_CASE(EH_SJLJ_SETUP_DISPATCH)
     TARGET_NODE_CASE(GETFUNPLT)
     TARGET_NODE_CASE(GETSTACKTOP)
     TARGET_NODE_CASE(GETTLSADDR)
@@ -1487,6 +1499,28 @@ SDValue VETargetLowering::lowerDYNAMIC_STACKALLOC(SDValue Op,
   return DAG.getMergeValues(Ops, DL);
 }
 
+SDValue VETargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
+                                               SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  return DAG.getNode(VEISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0),
+                     Op.getOperand(1));
+}
+
+SDValue VETargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
+                                              SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  return DAG.getNode(VEISD::EH_SJLJ_SETJMP, DL,
+                     DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
+                     Op.getOperand(1));
+}
+
+SDValue VETargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
+                                                      SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  return DAG.getNode(VEISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
+                     Op.getOperand(0));
+}
+
 static SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,
                               const VETargetLowering &TLI,
                               const VESubtarget *Subtarget) {
@@ -1599,6 +1633,12 @@ SDValue VETargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
     return lowerConstantPool(Op, DAG);
   case ISD::DYNAMIC_STACKALLOC:
     return lowerDYNAMIC_STACKALLOC(Op, DAG);
+  case ISD::EH_SJLJ_LONGJMP:
+    return lowerEH_SJLJ_LONGJMP(Op, DAG);
+  case ISD::EH_SJLJ_SETJMP:
+    return lowerEH_SJLJ_SETJMP(Op, DAG);
+  case ISD::EH_SJLJ_SETUP_DISPATCH:
+    return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
   case ISD::FRAMEADDR:
     return lowerFRAMEADDR(Op, DAG, *this, Subtarget);
   case ISD::GlobalAddress:
@@ -1699,6 +1739,677 @@ SDValue VETargetLowering::getPICJumpTableRelocBase(SDValue Table,
   return DAG.getNode(ISD::ADD, DL, PtrTy, GlobalBase, HiLo);
 }
 
+Register VETargetLowering::prepareMBB(MachineBasicBlock &MBB,
+                                      MachineBasicBlock::iterator I,
+                                      MachineBasicBlock *TargetBB,
+                                      const DebugLoc &DL) const {
+  MachineFunction *MF = MBB.getParent();
+  MachineRegisterInfo &MRI = MF->getRegInfo();
+  const VEInstrInfo *TII = Subtarget->getInstrInfo();
+
+  const TargetRegisterClass *RC = &VE::I64RegClass;
+  Register Tmp1 = MRI.createVirtualRegister(RC);
+  Register Tmp2 = MRI.createVirtualRegister(RC);
+  Register Result = MRI.createVirtualRegister(RC);
+
+  if (isPositionIndependent()) {
+    // Create following instructions for local linkage PIC code.
+    //     lea %Tmp1, TargetBB at gotoff_lo
+    //     and %Tmp2, %Tmp1, (32)0
+    //     lea.sl %Result, TargetBB at gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
+    BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
+        .addImm(0)
+        .addImm(0)
+        .addMBB(TargetBB, VEMCExpr::VK_VE_GOTOFF_LO32);
+    BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
+        .addReg(Tmp1, getKillRegState(true))
+        .addImm(M0(32));
+    BuildMI(MBB, I, DL, TII->get(VE::LEASLrri), Result)
+        .addReg(VE::SX15)
+        .addReg(Tmp2, getKillRegState(true))
+        .addMBB(TargetBB, VEMCExpr::VK_VE_GOTOFF_HI32);
+  } else {
+    // Create following instructions for non-PIC code.
+    //     lea     %Tmp1, TargetBB at lo
+    //     and     %Tmp2, %Tmp1, (32)0
+    //     lea.sl  %Result, TargetBB at hi(%Tmp2)
+    BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
+        .addImm(0)
+        .addImm(0)
+        .addMBB(TargetBB, VEMCExpr::VK_VE_LO32);
+    BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
+        .addReg(Tmp1, getKillRegState(true))
+        .addImm(M0(32));
+    BuildMI(MBB, I, DL, TII->get(VE::LEASLrii), Result)
+        .addReg(Tmp2, getKillRegState(true))
+        .addImm(0)
+        .addMBB(TargetBB, VEMCExpr::VK_VE_HI32);
+  }
+  return Result;
+}
+
+Register VETargetLowering::prepareSymbol(MachineBasicBlock &MBB,
+                                         MachineBasicBlock::iterator I,
+                                         StringRef Symbol, const DebugLoc &DL,
+                                         bool IsLocal = false,
+                                         bool IsCall = false) const {
+  MachineFunction *MF = MBB.getParent();
+  MachineRegisterInfo &MRI = MF->getRegInfo();
+  const VEInstrInfo *TII = Subtarget->getInstrInfo();
+
+  const TargetRegisterClass *RC = &VE::I64RegClass;
+  Register Result = MRI.createVirtualRegister(RC);
+
+  if (isPositionIndependent()) {
+    if (IsCall && !IsLocal) {
+      // Create following instructions for non-local linkage PIC code function
+      // calls.  These instructions uses IC and magic number -24, so we expand
+      // them in VEAsmPrinter.cpp from GETFUNPLT pseudo instruction.
+      //     lea %Reg, Symbol at plt_lo(-24)
+      //     and %Reg, %Reg, (32)0
+      //     sic %s16
+      //     lea.sl %Result, Symbol at plt_hi(%Reg, %s16) ; %s16 is PLT
+      BuildMI(MBB, I, DL, TII->get(VE::GETFUNPLT), Result)
+          .addExternalSymbol("abort");
+    } else if (IsLocal) {
+      Register Tmp1 = MRI.createVirtualRegister(RC);
+      Register Tmp2 = MRI.createVirtualRegister(RC);
+      // Create following instructions for local linkage PIC code.
+      //     lea %Tmp1, Symbol at gotoff_lo
+      //     and %Tmp2, %Tmp1, (32)0
+      //     lea.sl %Result, Symbol at gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
+      BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
+          .addImm(0)
+          .addImm(0)
+          .addExternalSymbol(Symbol.data(), VEMCExpr::VK_VE_GOTOFF_LO32);
+      BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
+          .addReg(Tmp1, getKillRegState(true))
+          .addImm(M0(32));
+      BuildMI(MBB, I, DL, TII->get(VE::LEASLrri), Result)
+          .addReg(VE::SX15)
+          .addReg(Tmp2, getKillRegState(true))
+          .addExternalSymbol(Symbol.data(), VEMCExpr::VK_VE_GOTOFF_HI32);
+    } else {
+      Register Tmp1 = MRI.createVirtualRegister(RC);
+      Register Tmp2 = MRI.createVirtualRegister(RC);
+      // Create following instructions for not local linkage PIC code.
+      //     lea %Tmp1, Symbol at got_lo
+      //     and %Tmp2, %Tmp1, (32)0
+      //     lea.sl %Tmp3, Symbol at gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
+      //     ld %Result, 0(%Tmp3)
+      Register Tmp3 = MRI.createVirtualRegister(RC);
+      BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
+          .addImm(0)
+          .addImm(0)
+          .addExternalSymbol(Symbol.data(), VEMCExpr::VK_VE_GOT_LO32);
+      BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
+          .addReg(Tmp1, getKillRegState(true))
+          .addImm(M0(32));
+      BuildMI(MBB, I, DL, TII->get(VE::LEASLrri), Tmp3)
+          .addReg(VE::SX15)
+          .addReg(Tmp2, getKillRegState(true))
+          .addExternalSymbol(Symbol.data(), VEMCExpr::VK_VE_GOT_HI32);
+      BuildMI(MBB, I, DL, TII->get(VE::LDrii), Result)
+          .addReg(Tmp3, getKillRegState(true))
+          .addImm(0)
+          .addImm(0);
+    }
+  } else {
+    Register Tmp1 = MRI.createVirtualRegister(RC);
+    Register Tmp2 = MRI.createVirtualRegister(RC);
+    // Create following instructions for non-PIC code.
+    //     lea     %Tmp1, Symbol at lo
+    //     and     %Tmp2, %Tmp1, (32)0
+    //     lea.sl  %Result, Symbol at hi(%Tmp2)
+    BuildMI(MBB, I, DL, TII->get(VE::LEAzii), Tmp1)
+        .addImm(0)
+        .addImm(0)
+        .addExternalSymbol(Symbol.data(), VEMCExpr::VK_VE_LO32);
+    BuildMI(MBB, I, DL, TII->get(VE::ANDrm), Tmp2)
+        .addReg(Tmp1, getKillRegState(true))
+        .addImm(M0(32));
+    BuildMI(MBB, I, DL, TII->get(VE::LEASLrii), Result)
+        .addReg(Tmp2, getKillRegState(true))
+        .addImm(0)
+        .addExternalSymbol(Symbol.data(), VEMCExpr::VK_VE_HI32);
+  }
+  return Result;
+}
+
+void VETargetLowering::setupEntryBlockForSjLj(MachineInstr &MI,
+                                              MachineBasicBlock *MBB,
+                                              MachineBasicBlock *DispatchBB,
+                                              int FI, int Offset) const {
+  DebugLoc DL = MI.getDebugLoc();
+  const VEInstrInfo *TII = Subtarget->getInstrInfo();
+
+  Register LabelReg =
+      prepareMBB(*MBB, MachineBasicBlock::iterator(MI), DispatchBB, DL);
+
+  // Store an address of DispatchBB to a given jmpbuf[1] where has next IC
+  // referenced by longjmp (throw) later.
+  MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(VE::STrii));
+  addFrameReference(MIB, FI, Offset); // jmpbuf[1]
+  MIB.addReg(LabelReg, getKillRegState(true));
+}
+
+MachineBasicBlock *
+VETargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
+                                   MachineBasicBlock *MBB) const {
+  DebugLoc DL = MI.getDebugLoc();
+  MachineFunction *MF = MBB->getParent();
+  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+  const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
+  MachineRegisterInfo &MRI = MF->getRegInfo();
+
+  const BasicBlock *BB = MBB->getBasicBlock();
+  MachineFunction::iterator I = ++MBB->getIterator();
+
+  // Memory Reference.
+  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+                                           MI.memoperands_end());
+  Register BufReg = MI.getOperand(1).getReg();
+
+  Register DstReg;
+
+  DstReg = MI.getOperand(0).getReg();
+  const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
+  assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
+  (void)TRI;
+  Register MainDestReg = MRI.createVirtualRegister(RC);
+  Register RestoreDestReg = MRI.createVirtualRegister(RC);
+
+  // For `v = call @llvm.eh.sjlj.setjmp(buf)`, we generate following
+  // instructions.  SP/FP must be saved in jmpbuf before `llvm.eh.sjlj.setjmp`.
+  //
+  // ThisMBB:
+  //   buf[3] = %s17 iff %s17 is used as BP
+  //   buf[1] = RestoreMBB as IC after longjmp
+  //   # SjLjSetup RestoreMBB
+  //
+  // MainMBB:
+  //   v_main = 0
+  //
+  // SinkMBB:
+  //   v = phi(v_main, MainMBB, v_restore, RestoreMBB)
+  //   ...
+  //
+  // RestoreMBB:
+  //   %s17 = buf[3] = iff %s17 is used as BP
+  //   v_restore = 1
+  //   goto SinkMBB
+
+  MachineBasicBlock *ThisMBB = MBB;
+  MachineBasicBlock *MainMBB = MF->CreateMachineBasicBlock(BB);
+  MachineBasicBlock *SinkMBB = MF->CreateMachineBasicBlock(BB);
+  MachineBasicBlock *RestoreMBB = MF->CreateMachineBasicBlock(BB);
+  MF->insert(I, MainMBB);
+  MF->insert(I, SinkMBB);
+  MF->push_back(RestoreMBB);
+  RestoreMBB->setHasAddressTaken();
+
+  // Transfer the remainder of BB and its successor edges to SinkMBB.
+  SinkMBB->splice(SinkMBB->begin(), MBB,
+                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
+  SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+  // ThisMBB:
+  Register LabelReg =
+      prepareMBB(*MBB, MachineBasicBlock::iterator(MI), RestoreMBB, DL);
+
+  // Store BP in buf[3] iff this function is using BP.
+  const VEFrameLowering *TFI = Subtarget->getFrameLowering();
+  if (TFI->hasBP(*MF)) {
+    MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(VE::STrii));
+    MIB.addReg(BufReg);
+    MIB.addImm(0);
+    MIB.addImm(24);
+    MIB.addReg(VE::SX17);
+    MIB.setMemRefs(MMOs);
+  }
+
+  // Store IP in buf[1].
+  MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(VE::STrii));
+  MIB.add(MI.getOperand(1)); // we can preserve the kill flags here.
+  MIB.addImm(0);
+  MIB.addImm(8);
+  MIB.addReg(LabelReg, getKillRegState(true));
+  MIB.setMemRefs(MMOs);
+
+  // SP/FP are already stored in jmpbuf before `llvm.eh.sjlj.setjmp`.
+
+  // Insert setup.
+  MIB =
+      BuildMI(*ThisMBB, MI, DL, TII->get(VE::EH_SjLj_Setup)).addMBB(RestoreMBB);
+
+  const VERegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+  MIB.addRegMask(RegInfo->getNoPreservedMask());
+  ThisMBB->addSuccessor(MainMBB);
+  ThisMBB->addSuccessor(RestoreMBB);
+
+  // MainMBB:
+  BuildMI(MainMBB, DL, TII->get(VE::LEAzii), MainDestReg)
+      .addImm(0)
+      .addImm(0)
+      .addImm(0);
+  MainMBB->addSuccessor(SinkMBB);
+
+  // SinkMBB:
+  BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(VE::PHI), DstReg)
+      .addReg(MainDestReg)
+      .addMBB(MainMBB)
+      .addReg(RestoreDestReg)
+      .addMBB(RestoreMBB);
+
+  // RestoreMBB:
+  // Restore BP from buf[3] iff this function is using BP.  The address of
+  // buf is in SX10.
+  // FIXME: Better to not use SX10 here
+  if (TFI->hasBP(*MF)) {
+    MachineInstrBuilder MIB =
+        BuildMI(RestoreMBB, DL, TII->get(VE::LDrii), VE::SX17);
+    MIB.addReg(VE::SX10);
+    MIB.addImm(0);
+    MIB.addImm(24);
+    MIB.setMemRefs(MMOs);
+  }
+  BuildMI(RestoreMBB, DL, TII->get(VE::LEAzii), RestoreDestReg)
+      .addImm(0)
+      .addImm(0)
+      .addImm(1);
+  BuildMI(RestoreMBB, DL, TII->get(VE::BRCFLa_t)).addMBB(SinkMBB);
+  RestoreMBB->addSuccessor(SinkMBB);
+
+  MI.eraseFromParent();
+  return SinkMBB;
+}
+
+MachineBasicBlock *
+VETargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
+                                    MachineBasicBlock *MBB) const {
+  DebugLoc DL = MI.getDebugLoc();
+  MachineFunction *MF = MBB->getParent();
+  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+  MachineRegisterInfo &MRI = MF->getRegInfo();
+
+  // Memory Reference.
+  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+                                           MI.memoperands_end());
+  Register BufReg = MI.getOperand(0).getReg();
+
+  Register Tmp = MRI.createVirtualRegister(&VE::I64RegClass);
+  // Since FP is only updated here but NOT referenced, it's treated as GPR.
+  Register FP = VE::SX9;
+  Register SP = VE::SX11;
+
+  MachineInstrBuilder MIB;
+
+  MachineBasicBlock *ThisMBB = MBB;
+
+  // For `call @llvm.eh.sjlj.longjmp(buf)`, we generate following instructions.
+  //
+  // ThisMBB:
+  //   %fp = load buf[0]
+  //   %jmp = load buf[1]
+  //   %s10 = buf        ; Store an address of buf to SX10 for RestoreMBB
+  //   %sp = load buf[2] ; generated by llvm.eh.sjlj.setjmp.
+  //   jmp %jmp
+
+  // Reload FP.
+  MIB = BuildMI(*ThisMBB, MI, DL, TII->get(VE::LDrii), FP);
+  MIB.addReg(BufReg);
+  MIB.addImm(0);
+  MIB.addImm(0);
+  MIB.setMemRefs(MMOs);
+
+  // Reload IP.
+  MIB = BuildMI(*ThisMBB, MI, DL, TII->get(VE::LDrii), Tmp);
+  MIB.addReg(BufReg);
+  MIB.addImm(0);
+  MIB.addImm(8);
+  MIB.setMemRefs(MMOs);
+
+  // Copy BufReg to SX10 for later use in setjmp.
+  // FIXME: Better to not use SX10 here
+  BuildMI(*ThisMBB, MI, DL, TII->get(VE::ORri), VE::SX10)
+      .addReg(BufReg)
+      .addImm(0);
+
+  // Reload SP.
+  MIB = BuildMI(*ThisMBB, MI, DL, TII->get(VE::LDrii), SP);
+  MIB.add(MI.getOperand(0)); // we can preserve the kill flags here.
+  MIB.addImm(0);
+  MIB.addImm(16);
+  MIB.setMemRefs(MMOs);
+
+  // Jump.
+  BuildMI(*ThisMBB, MI, DL, TII->get(VE::BCFLari_t))
+      .addReg(Tmp, getKillRegState(true))
+      .addImm(0);
+
+  MI.eraseFromParent();
+  return ThisMBB;
+}
+
+MachineBasicBlock *
+VETargetLowering::emitSjLjDispatchBlock(MachineInstr &MI,
+                                        MachineBasicBlock *BB) const {
+  DebugLoc DL = MI.getDebugLoc();
+  MachineFunction *MF = BB->getParent();
+  MachineFrameInfo &MFI = MF->getFrameInfo();
+  MachineRegisterInfo &MRI = MF->getRegInfo();
+  const VEInstrInfo *TII = Subtarget->getInstrInfo();
+  int FI = MFI.getFunctionContextIndex();
+
+  // Get a mapping of the call site numbers to all of the landing pads they're
+  // associated with.
+  DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
+  unsigned MaxCSNum = 0;
+  for (auto &MBB : *MF) {
+    if (!MBB.isEHPad())
+      continue;
+
+    MCSymbol *Sym = nullptr;
+    for (const auto &MI : MBB) {
+      if (MI.isDebugInstr())
+        continue;
+
+      assert(MI.isEHLabel() && "expected EH_LABEL");
+      Sym = MI.getOperand(0).getMCSymbol();
+      break;
+    }
+
+    if (!MF->hasCallSiteLandingPad(Sym))
+      continue;
+
+    for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
+      CallSiteNumToLPad[CSI].push_back(&MBB);
+      MaxCSNum = std::max(MaxCSNum, CSI);
+    }
+  }
+
+  // Get an ordered list of the machine basic blocks for the jump table.
+  std::vector<MachineBasicBlock *> LPadList;
+  SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
+  LPadList.reserve(CallSiteNumToLPad.size());
+
+  for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
+    for (auto &LP : CallSiteNumToLPad[CSI]) {
+      LPadList.push_back(LP);
+      InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
+    }
+  }
+
+  assert(!LPadList.empty() &&
+         "No landing pad destinations for the dispatch jump table!");
+
+  // The %fn_context is allocated like below (from --print-after=sjljehprepare):
+  //   %fn_context = alloca { i8*, i64, [4 x i64], i8*, i8*, [5 x i8*] }
+  //
+  // This `[5 x i8*]` is jmpbuf, so jmpbuf[1] is FI+72.
+  // First `i64` is callsite, so callsite is FI+8.
+  static const int OffsetIC = 72;
+  static const int OffsetCS = 8;
+
+  // Create the MBBs for the dispatch code like following:
+  //
+  // ThisMBB:
+  //   Prepare DispatchBB address and store it to buf[1].
+  //   ...
+  //
+  // DispatchBB:
+  //   %s15 = GETGOT iff isPositionIndependent
+  //   %callsite = load callsite
+  //   brgt.l.t #size of callsites, %callsite, DispContBB
+  //
+  // TrapBB:
+  //   Call abort.
+  //
+  // DispContBB:
+  //   %breg = address of jump table
+  //   %pc = load and calculate next pc from %breg and %callsite
+  //   jmp %pc
+
+  // Shove the dispatch's address into the return slot in the function context.
+  MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
+  DispatchBB->setIsEHPad(true);
+
+  // Trap BB will causes trap like `assert(0)`.
+  MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
+  DispatchBB->addSuccessor(TrapBB);
+
+  MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
+  DispatchBB->addSuccessor(DispContBB);
+
+  // Insert MBBs.
+  MF->push_back(DispatchBB);
+  MF->push_back(DispContBB);
+  MF->push_back(TrapBB);
+
+  // Insert code to call abort in the TrapBB.
+  Register Abort = prepareSymbol(*TrapBB, TrapBB->end(), "abort", DL,
+                                 /* Local */ false, /* Call */ true);
+  BuildMI(TrapBB, DL, TII->get(VE::BSICrii), VE::SX10)
+      .addReg(Abort, getKillRegState(true))
+      .addImm(0)
+      .addImm(0);
+
+  // Insert code into the entry block that creates and registers the function
+  // context.
+  setupEntryBlockForSjLj(MI, BB, DispatchBB, FI, OffsetIC);
+
+  // Create the jump table and associated information
+  unsigned JTE = getJumpTableEncoding();
+  MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
+  unsigned MJTI = JTI->createJumpTableIndex(LPadList);
+
+  const VERegisterInfo &RI = TII->getRegisterInfo();
+  // Add a register mask with no preserved registers.  This results in all
+  // registers being marked as clobbered.
+  BuildMI(DispatchBB, DL, TII->get(VE::NOP))
+      .addRegMask(RI.getNoPreservedMask());
+
+  if (isPositionIndependent()) {
+    // Force to generate GETGOT, since current implementation doesn't store GOT
+    // register.
+    BuildMI(DispatchBB, DL, TII->get(VE::GETGOT), VE::SX15);
+  }
+
+  // IReg is used as an index in a memory operand and therefore can't be SP
+  const TargetRegisterClass *RC = &VE::I64RegClass;
+  Register IReg = MRI.createVirtualRegister(RC);
+  addFrameReference(BuildMI(DispatchBB, DL, TII->get(VE::LDLZXrii), IReg), FI,
+                    OffsetCS);
+  if (LPadList.size() < 64) {
+    BuildMI(DispatchBB, DL, TII->get(VE::BRCFLir_t))
+        .addImm(VECC::CC_ILE)
+        .addImm(LPadList.size())
+        .addReg(IReg)
+        .addMBB(TrapBB);
+  } else {
+    assert(LPadList.size() <= 0x7FFFFFFF && "Too large Landing Pad!");
+    Register TmpReg = MRI.createVirtualRegister(RC);
+    BuildMI(DispatchBB, DL, TII->get(VE::LEAzii), TmpReg)
+        .addImm(0)
+        .addImm(0)
+        .addImm(LPadList.size());
+    BuildMI(DispatchBB, DL, TII->get(VE::BRCFLrr_t))
+        .addImm(VECC::CC_ILE)
+        .addReg(TmpReg, getKillRegState(true))
+        .addReg(IReg)
+        .addMBB(TrapBB);
+  }
+
+  Register BReg = MRI.createVirtualRegister(RC);
+  Register Tmp1 = MRI.createVirtualRegister(RC);
+  Register Tmp2 = MRI.createVirtualRegister(RC);
+
+  if (isPositionIndependent()) {
+    // Create following instructions for local linkage PIC code.
+    //     lea    %Tmp1, .LJTI0_0 at gotoff_lo
+    //     and    %Tmp2, %Tmp1, (32)0
+    //     lea.sl %BReg, .LJTI0_0 at gotoff_hi(%Tmp2, %s15) ; %s15 is GOT
+    BuildMI(DispContBB, DL, TII->get(VE::LEAzii), Tmp1)
+        .addImm(0)
+        .addImm(0)
+        .addJumpTableIndex(MJTI, VEMCExpr::VK_VE_GOTOFF_LO32);
+    BuildMI(DispContBB, DL, TII->get(VE::ANDrm), Tmp2)
+        .addReg(Tmp1, getKillRegState(true))
+        .addImm(M0(32));
+    BuildMI(DispContBB, DL, TII->get(VE::LEASLrri), BReg)
+        .addReg(VE::SX15)
+        .addReg(Tmp2, getKillRegState(true))
+        .addJumpTableIndex(MJTI, VEMCExpr::VK_VE_GOTOFF_HI32);
+  } else {
+    // Create following instructions for non-PIC code.
+    //     lea     %Tmp1, .LJTI0_0 at lo
+    //     and     %Tmp2, %Tmp1, (32)0
+    //     lea.sl  %BReg, .LJTI0_0 at hi(%Tmp2)
+    BuildMI(DispContBB, DL, TII->get(VE::LEAzii), Tmp1)
+        .addImm(0)
+        .addImm(0)
+        .addJumpTableIndex(MJTI, VEMCExpr::VK_VE_LO32);
+    BuildMI(DispContBB, DL, TII->get(VE::ANDrm), Tmp2)
+        .addReg(Tmp1, getKillRegState(true))
+        .addImm(M0(32));
+    BuildMI(DispContBB, DL, TII->get(VE::LEASLrii), BReg)
+        .addReg(Tmp2, getKillRegState(true))
+        .addImm(0)
+        .addJumpTableIndex(MJTI, VEMCExpr::VK_VE_HI32);
+  }
+
+  switch (JTE) {
+  case MachineJumpTableInfo::EK_BlockAddress: {
+    // Generate simple block address code for no-PIC model.
+    //     sll %Tmp1, %IReg, 3
+    //     lds %TReg, 0(%Tmp1, %BReg)
+    //     bcfla %TReg
+
+    Register TReg = MRI.createVirtualRegister(RC);
+    Register Tmp1 = MRI.createVirtualRegister(RC);
+
+    BuildMI(DispContBB, DL, TII->get(VE::SLLri), Tmp1)
+        .addReg(IReg, getKillRegState(true))
+        .addImm(3);
+    BuildMI(DispContBB, DL, TII->get(VE::LDrri), TReg)
+        .addReg(BReg, getKillRegState(true))
+        .addReg(Tmp1, getKillRegState(true))
+        .addImm(0);
+    BuildMI(DispContBB, DL, TII->get(VE::BCFLari_t))
+        .addReg(TReg, getKillRegState(true))
+        .addImm(0);
+    break;
+  }
+  case MachineJumpTableInfo::EK_Custom32: {
+    // Generate block address code using 
diff erences from the function pointer
+    // for PIC model.
+    //     sll %Tmp1, %IReg, 2
+    //     ldl.zx %OReg, 0(%Tmp1, %BReg)
+    //     Prepare function address in BReg2.
+    //     adds.l %TReg, %BReg2, %OReg
+    //     bcfla %TReg
+
+    assert(isPositionIndependent());
+    Register OReg = MRI.createVirtualRegister(RC);
+    Register TReg = MRI.createVirtualRegister(RC);
+    Register Tmp1 = MRI.createVirtualRegister(RC);
+
+    BuildMI(DispContBB, DL, TII->get(VE::SLLri), Tmp1)
+        .addReg(IReg, getKillRegState(true))
+        .addImm(2);
+    BuildMI(DispContBB, DL, TII->get(VE::LDLZXrri), OReg)
+        .addReg(BReg, getKillRegState(true))
+        .addReg(Tmp1, getKillRegState(true))
+        .addImm(0);
+    Register BReg2 =
+        prepareSymbol(*DispContBB, DispContBB->end(),
+                      DispContBB->getParent()->getName(), DL, /* Local */ true);
+    BuildMI(DispContBB, DL, TII->get(VE::ADDSLrr), TReg)
+        .addReg(OReg, getKillRegState(true))
+        .addReg(BReg2, getKillRegState(true));
+    BuildMI(DispContBB, DL, TII->get(VE::BCFLari_t))
+        .addReg(TReg, getKillRegState(true))
+        .addImm(0);
+    break;
+  }
+  default:
+    llvm_unreachable("Unexpected jump table encoding");
+  }
+
+  // Add the jump table entries as successors to the MBB.
+  SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
+  for (auto &LP : LPadList)
+    if (SeenMBBs.insert(LP).second)
+      DispContBB->addSuccessor(LP);
+
+  // N.B. the order the invoke BBs are processed in doesn't matter here.
+  SmallVector<MachineBasicBlock *, 64> MBBLPads;
+  const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
+  for (MachineBasicBlock *MBB : InvokeBBs) {
+    // Remove the landing pad successor from the invoke block and replace it
+    // with the new dispatch block.
+    // Keep a copy of Successors since it's modified inside the loop.
+    SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
+                                                   MBB->succ_rend());
+    // FIXME: Avoid quadratic complexity.
+    for (auto MBBS : Successors) {
+      if (MBBS->isEHPad()) {
+        MBB->removeSuccessor(MBBS);
+        MBBLPads.push_back(MBBS);
+      }
+    }
+
+    MBB->addSuccessor(DispatchBB);
+
+    // Find the invoke call and mark all of the callee-saved registers as
+    // 'implicit defined' so that they're spilled.  This prevents code from
+    // moving instructions to before the EH block, where they will never be
+    // executed.
+    for (auto &II : reverse(*MBB)) {
+      if (!II.isCall())
+        continue;
+
+      DenseMap<Register, bool> DefRegs;
+      for (auto &MOp : II.operands())
+        if (MOp.isReg())
+          DefRegs[MOp.getReg()] = true;
+
+      MachineInstrBuilder MIB(*MF, &II);
+      for (unsigned RI = 0; SavedRegs[RI]; ++RI) {
+        Register Reg = SavedRegs[RI];
+        if (!DefRegs[Reg])
+          MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
+      }
+
+      break;
+    }
+  }
+
+  // Mark all former landing pads as non-landing pads.  The dispatch is the only
+  // landing pad now.
+  for (auto &LP : MBBLPads)
+    LP->setIsEHPad(false);
+
+  // The instruction is gone now.
+  MI.eraseFromParent();
+  return BB;
+}
+
+MachineBasicBlock *
+VETargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
+                                              MachineBasicBlock *BB) const {
+  switch (MI.getOpcode()) {
+  default:
+    llvm_unreachable("Unknown Custom Instruction!");
+  case VE::EH_SjLj_LongJmp:
+    return emitEHSjLjLongJmp(MI, BB);
+  case VE::EH_SjLj_SetJmp:
+    return emitEHSjLjSetJmp(MI, BB);
+  case VE::EH_SjLj_Setup_Dispatch:
+    return emitSjLjDispatchBlock(MI, BB);
+  }
+}
+
 static bool isI32Insn(const SDNode *User, const SDNode *N) {
   switch (User->getOpcode()) {
   default:

diff  --git a/llvm/lib/Target/VE/VEISelLowering.h b/llvm/lib/Target/VE/VEISelLowering.h
index eec4532ffa3a..f2055da0745e 100644
--- a/llvm/lib/Target/VE/VEISelLowering.h
+++ b/llvm/lib/Target/VE/VEISelLowering.h
@@ -24,19 +24,22 @@ namespace VEISD {
 enum NodeType : unsigned {
   FIRST_NUMBER = ISD::BUILTIN_OP_END,
 
-  CALL,            // A call instruction.
-  GETFUNPLT,       // Load function address through %plt insturction.
-  GETTLSADDR,      // Load address for TLS access.
-  GETSTACKTOP,     // Retrieve address of stack top (first address of
-                   // locals and temporaries).
-  GLOBAL_BASE_REG, // Global base reg for PIC.
-  Hi,              // Hi/Lo operations, typically on a global address.
-  Lo,              // Hi/Lo operations, typically on a global address.
-  MEMBARRIER,      // Compiler barrier only; generate a no-op.
-  RET_FLAG,        // Return with a flag operand.
-  TS1AM,           // A TS1AM instruction used for 1/2 bytes swap.
-  VEC_BROADCAST,   // A vector broadcast instruction.
-                   //   0: scalar value, 1: VL
+  CALL,                   // A call instruction.
+  EH_SJLJ_LONGJMP,        // SjLj exception handling longjmp.
+  EH_SJLJ_SETJMP,         // SjLj exception handling setjmp.
+  EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
+  GETFUNPLT,              // Load function address through %plt insturction.
+  GETTLSADDR,             // Load address for TLS access.
+  GETSTACKTOP,            // Retrieve address of stack top (first address of
+                          // locals and temporaries).
+  GLOBAL_BASE_REG,        // Global base reg for PIC.
+  Hi,                     // Hi/Lo operations, typically on a global address.
+  Lo,                     // Hi/Lo operations, typically on a global address.
+  MEMBARRIER,             // Compiler barrier only; generate a no-op.
+  RET_FLAG,               // Return with a flag operand.
+  TS1AM,                  // A TS1AM instruction used for 1/2 bytes swap.
+  VEC_BROADCAST,          // A vector broadcast instruction.
+                          //   0: scalar value, 1: VL
 
 // VVP_* nodes.
 #define ADD_VVP_OP(VVP_NAME, ...) VVP_NAME,
@@ -113,6 +116,9 @@ class VETargetLowering : public TargetLowering {
   SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
@@ -132,6 +138,29 @@ class VETargetLowering : public TargetLowering {
   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
                           SelectionDAG &DAG) const override;
 
+  /// Custom Inserter {
+  MachineBasicBlock *
+  EmitInstrWithCustomInserter(MachineInstr &MI,
+                              MachineBasicBlock *MBB) const override;
+  MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
+                                       MachineBasicBlock *MBB) const;
+  MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
+                                      MachineBasicBlock *MBB) const;
+  MachineBasicBlock *emitSjLjDispatchBlock(MachineInstr &MI,
+                                           MachineBasicBlock *BB) const;
+
+  void setupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
+                              MachineBasicBlock *DispatchBB, int FI,
+                              int Offset) const;
+  // Setup basic block address.
+  Register prepareMBB(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+                      MachineBasicBlock *TargetBB, const DebugLoc &DL) const;
+  // Prepare function/variable address.
+  Register prepareSymbol(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+                         StringRef Symbol, const DebugLoc &DL, bool IsLocal,
+                         bool IsCall) const;
+  /// } Custom Inserter
+
   /// VVP Lowering {
   SDValue lowerToVVP(SDValue Op, SelectionDAG &DAG) const;
   /// } VVPLowering

diff  --git a/llvm/lib/Target/VE/VEInstrBuilder.h b/llvm/lib/Target/VE/VEInstrBuilder.h
new file mode 100644
index 000000000000..1b0e07546931
--- /dev/null
+++ b/llvm/lib/Target/VE/VEInstrBuilder.h
@@ -0,0 +1,41 @@
+//===-- VEInstrBuilder.h - Aides for building VE insts ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes functions that may be used with BuildMI from the
+// MachineInstrBuilder.h file to simplify generating frame and constant pool
+// references.
+//
+// For reference, the order of operands for memory references is:
+// (Operand), Dest Reg, Base Reg, and either Reg Index or Immediate
+// Displacement.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_VE_VEINSTRBUILDER_H
+#define LLVM_LIB_TARGET_VE_VEINSTRBUILDER_H
+
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+
+namespace llvm {
+
+/// addFrameReference - This function is used to add a reference to the base of
+/// an abstract object on the stack frame of the current function.  This
+/// reference has base register as the FrameIndex offset until it is resolved.
+/// This allows a constant offset to be specified as well...
+///
+static inline const MachineInstrBuilder &
+addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0,
+                  bool ThreeOp = true) {
+  if (ThreeOp)
+    return MIB.addFrameIndex(FI).addImm(0).addImm(Offset);
+  return MIB.addFrameIndex(FI).addImm(Offset);
+}
+
+} // namespace llvm
+
+#endif

diff  --git a/llvm/lib/Target/VE/VEInstrInfo.td b/llvm/lib/Target/VE/VEInstrInfo.td
index debd00ff6f96..0e41473733c0 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.td
+++ b/llvm/lib/Target/VE/VEInstrInfo.td
@@ -446,6 +446,17 @@ def retflag       : SDNode<"VEISD::RET_FLAG", SDTNone,
 
 def getGOT        : Operand<iPTR>;
 
+def VEeh_sjlj_setjmp: SDNode<"VEISD::EH_SJLJ_SETJMP",
+                             SDTypeProfile<1, 1, [SDTCisInt<0>,
+                                                  SDTCisPtrTy<1>]>,
+                             [SDNPHasChain, SDNPSideEffect]>;
+def VEeh_sjlj_longjmp: SDNode<"VEISD::EH_SJLJ_LONGJMP",
+                              SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
+                              [SDNPHasChain, SDNPSideEffect]>;
+def VEeh_sjlj_setup_dispatch: SDNode<"VEISD::EH_SJLJ_SETUP_DISPATCH",
+                                     SDTypeProfile<0, 0, []>,
+                                     [SDNPHasChain, SDNPSideEffect]>;
+
 // GETFUNPLT for PIC
 def GetFunPLT : SDNode<"VEISD::GETFUNPLT", SDTIntUnaryOp>;
 
@@ -1878,6 +1889,33 @@ def : Pat<(i32 (atomic_swap_32 ADDRri:$src, i32:$new)),
 def : Pat<(i64 (atomic_swap_64 ADDRri:$src, i64:$new)),
           (TS1AMLrir MEMriRRM:$src, (LEAzii 0, 0, 255), i64:$new)>;
 
+//===----------------------------------------------------------------------===//
+// SJLJ Exception handling patterns
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
+    usesCustomInserter = 1 in {
+  let isTerminator = 1 in
+  def EH_SjLj_LongJmp : Pseudo<(outs), (ins I64:$buf),
+                               "# EH_SJLJ_LONGJMP",
+                               [(VEeh_sjlj_longjmp I64:$buf)]>;
+
+  def EH_SjLj_SetJmp  : Pseudo<(outs I32:$dst), (ins I64:$buf),
+                               "# EH_SJLJ_SETJMP",
+                               [(set I32:$dst, (VEeh_sjlj_setjmp I64:$buf))]>;
+
+  def EH_SjLj_Setup_Dispatch : Pseudo<(outs), (ins), "# EH_SJLJ_SETUP_DISPATCH",
+                                      [(VEeh_sjlj_setup_dispatch)]>;
+}
+
+let isTerminator = 1, isBranch = 1, isCodeGenOnly = 1 in
+  def EH_SjLj_Setup : Pseudo<(outs), (ins brtarget32:$dst),
+                             "# EH_SJlJ_SETUP $dst">;
+
+//===----------------------------------------------------------------------===//
+// Branch related patterns
+//===----------------------------------------------------------------------===//
+
 // Branches
 def : Pat<(br bb:$addr), (BRCFLa bb:$addr)>;
 

diff  --git a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj.ll b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj.ll
new file mode 100644
index 000000000000..d803a5fe7f26
--- /dev/null
+++ b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj.ll
@@ -0,0 +1,213 @@
+; RUN: llc < %s -mtriple=ve | FileCheck %s
+; RUN: llc < %s -mtriple=ve -relocation-model=pic | \
+; RUN:     FileCheck %s -check-prefix=PIC
+
+%struct.__jmp_buf_tag = type { [25 x i64], i64, [16 x i64] }
+
+ at buf = common global [1 x %struct.__jmp_buf_tag] zeroinitializer, align 8
+
+; Function Attrs: noinline nounwind optnone
+define signext i32 @t_setjmp() {
+; CHECK-LABEL: t_setjmp:
+; CHECK:       .LBB{{[0-9]+}}_5:
+; CHECK-NEXT:    st %s18, 48(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s19, 56(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s20, 64(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s21, 72(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s22, 80(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s23, 88(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s24, 96(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s25, 104(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s26, 112(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s27, 120(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s28, 128(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s29, 136(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s30, 144(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s31, 152(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s32, 160(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s33, 168(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    lea %s0, buf at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, buf at hi(, %s0)
+; CHECK-NEXT:    st %s9, (, %s0)
+; CHECK-NEXT:    st %s11, 16(, %s0)
+; CHECK-NEXT:    lea %s1, .LBB{{[0-9]+}}_3 at lo
+; CHECK-NEXT:    and %s1, %s1, (32)0
+; CHECK-NEXT:    lea.sl %s1, .LBB{{[0-9]+}}_3 at hi(, %s1)
+; CHECK-NEXT:    st %s1, 8(, %s0)
+; CHECK-NEXT:    # EH_SJlJ_SETUP .LBB{{[0-9]+}}_3
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    lea %s0, 0
+; CHECK-NEXT:    br.l.t .LBB{{[0-9]+}}_2
+; CHECK-NEXT:  .LBB{{[0-9]+}}_3: # Block address taken
+; CHECK-NEXT:    lea %s0, 1
+; CHECK-NEXT:  .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    ld %s33, 168(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s32, 160(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s31, 152(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s30, 144(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s29, 136(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s28, 128(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s27, 120(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s26, 112(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s25, 104(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s24, 96(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s23, 88(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s22, 80(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s21, 72(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s20, 64(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s19, 56(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s18, 48(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    or %s11, 0, %s9
+;
+; PIC-LABEL: t_setjmp:
+; PIC:       # %bb.0:
+; PIC-NEXT:    st %s9, (, %s11)
+; PIC-NEXT:    st %s10, 8(, %s11)
+; PIC-NEXT:    st %s15, 24(, %s11)
+; PIC-NEXT:    st %s16, 32(, %s11)
+; PIC-NEXT:    or %s9, 0, %s11
+; PIC-NEXT:    lea %s11, -176(, %s11)
+; PIC-NEXT:    brge.l %s11, %s8, .LBB0_5
+; PIC-NEXT:  # %bb.4:
+; PIC-NEXT:    ld %s61, 24(, %s14)
+; PIC-NEXT:    or %s62, 0, %s0
+; PIC-NEXT:    lea %s63, 315
+; PIC-NEXT:    shm.l %s63, (%s61)
+; PIC-NEXT:    shm.l %s8, 8(%s61)
+; PIC-NEXT:    shm.l %s11, 16(%s61)
+; PIC-NEXT:    monc
+; PIC-NEXT:    or %s0, 0, %s62
+; PIC-NEXT:  .LBB0_5:
+; PIC-NEXT:    st %s18, 48(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s19, 56(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s20, 64(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s21, 72(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s22, 80(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s23, 88(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s24, 96(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s25, 104(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s26, 112(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s27, 120(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s28, 128(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s29, 136(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s30, 144(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s31, 152(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s32, 160(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s33, 168(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    lea %s15, _GLOBAL_OFFSET_TABLE_ at pc_lo(-24)
+; PIC-NEXT:    and %s15, %s15, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s15, _GLOBAL_OFFSET_TABLE_ at pc_hi(%s16, %s15)
+; PIC-NEXT:    lea %s0, buf at got_lo
+; PIC-NEXT:    and %s0, %s0, (32)0
+; PIC-NEXT:    lea.sl %s0, buf at got_hi(, %s0)
+; PIC-NEXT:    ld %s0, (%s0, %s15)
+; PIC-NEXT:    st %s9, (, %s0)
+; PIC-NEXT:    st %s11, 16(, %s0)
+; PIC-NEXT:    lea %s1, .LBB0_3 at gotoff_lo
+; PIC-NEXT:    and %s1, %s1, (32)0
+; PIC-NEXT:    lea.sl %s1, .LBB0_3 at gotoff_hi(%s1, %s15)
+; PIC-NEXT:    st %s1, 8(, %s0)
+; PIC-NEXT:    # EH_SJlJ_SETUP .LBB0_3
+; PIC-NEXT:  # %bb.1:
+; PIC-NEXT:    lea %s0, 0
+; PIC-NEXT:    br.l.t .LBB0_2
+; PIC-NEXT:  .LBB0_3: # Block address taken
+; PIC-NEXT:    lea %s0, 1
+; PIC-NEXT:  .LBB0_2:
+; PIC-NEXT:    adds.w.sx %s0, %s0, (0)1
+; PIC-NEXT:    ld %s33, 168(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s32, 160(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s31, 152(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s30, 144(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s29, 136(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s28, 128(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s27, 120(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s26, 112(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s25, 104(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s24, 96(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s23, 88(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s22, 80(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s21, 72(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s20, 64(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s19, 56(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s18, 48(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    or %s11, 0, %s9
+; PIC-NEXT:    ld %s16, 32(, %s11)
+; PIC-NEXT:    ld %s15, 24(, %s11)
+; PIC-NEXT:    ld %s10, 8(, %s11)
+; PIC-NEXT:    ld %s9, (, %s11)
+; PIC-NEXT:    b.l.t (, %s10)
+  %1 = call i8* @llvm.frameaddress(i32 0)
+  store i8* %1, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @buf to i8**), align 8
+  %2 = call i8* @llvm.stacksave()
+  store i8* %2, i8** getelementptr inbounds (i8*, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @buf to i8**), i64 2), align 8
+  %3 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @buf to i8*))
+  ret i32 %3
+}
+
+; Function Attrs: nounwind readnone
+declare i8* @llvm.frameaddress(i32)
+
+; Function Attrs: nounwind
+declare i8* @llvm.stacksave()
+
+; Function Attrs: nounwind
+declare i32 @llvm.eh.sjlj.setjmp(i8*)
+
+; Function Attrs: noinline nounwind optnone
+define void @t_longjmp() {
+; CHECK-LABEL: t_longjmp:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, buf at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, buf at hi(, %s0)
+; CHECK-NEXT:    ld %s9, (, %s0)
+; CHECK-NEXT:    ld %s1, 8(, %s0)
+; CHECK-NEXT:    or %s10, 0, %s0
+; CHECK-NEXT:    ld %s11, 16(, %s0)
+; CHECK-NEXT:    b.l.t (, %s1)
+;
+; PIC-LABEL: t_longjmp:
+; PIC:       # %bb.0:
+; PIC-NEXT:    st %s9, (, %s11)
+; PIC-NEXT:    st %s10, 8(, %s11)
+; PIC-NEXT:    st %s15, 24(, %s11)
+; PIC-NEXT:    st %s16, 32(, %s11)
+; PIC-NEXT:    or %s9, 0, %s11
+; PIC-NEXT:    lea %s11, -176(, %s11)
+; PIC-NEXT:    brge.l.t %s11, %s8, .LBB1_2
+; PIC-NEXT:  # %bb.1:
+; PIC-NEXT:    ld %s61, 24(, %s14)
+; PIC-NEXT:    or %s62, 0, %s0
+; PIC-NEXT:    lea %s63, 315
+; PIC-NEXT:    shm.l %s63, (%s61)
+; PIC-NEXT:    shm.l %s8, 8(%s61)
+; PIC-NEXT:    shm.l %s11, 16(%s61)
+; PIC-NEXT:    monc
+; PIC-NEXT:    or %s0, 0, %s62
+; PIC-NEXT:  .LBB1_2:
+; PIC-NEXT:    lea %s15, _GLOBAL_OFFSET_TABLE_ at pc_lo(-24)
+; PIC-NEXT:    and %s15, %s15, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s15, _GLOBAL_OFFSET_TABLE_ at pc_hi(%s16, %s15)
+; PIC-NEXT:    lea %s0, buf at got_lo
+; PIC-NEXT:    and %s0, %s0, (32)0
+; PIC-NEXT:    lea.sl %s0, buf at got_hi(, %s0)
+; PIC-NEXT:    ld %s0, (%s0, %s15)
+; PIC-NEXT:    ld %s9, (, %s0)
+; PIC-NEXT:    ld %s1, 8(, %s0)
+; PIC-NEXT:    or %s10, 0, %s0
+; PIC-NEXT:    ld %s11, 16(, %s0)
+; PIC-NEXT:    b.l.t (, %s1)
+  call void @llvm.eh.sjlj.longjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @buf to i8*))
+  unreachable
+                                                  ; No predecessors!
+  ret void
+}
+
+; Function Attrs: noreturn nounwind
+declare void @llvm.eh.sjlj.longjmp(i8*)
+

diff  --git a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_bp.ll b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_bp.ll
new file mode 100644
index 000000000000..0e5ca3dba6a9
--- /dev/null
+++ b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_bp.ll
@@ -0,0 +1,87 @@
+; RUN: llc < %s -mtriple=ve | FileCheck %s
+
+%Foo = type { [125 x i8] }
+declare void @whatever(i64, %Foo*, i8**, i8*, i8*, i32)  #0
+declare i32 @llvm.eh.sjlj.setjmp(i8*) nounwind
+
+; Function Attrs: noinline nounwind optnone
+define i32 @t_setjmp(i64 %n, %Foo* byval(%Foo) nocapture readnone align 8 %f) {
+; CHECK-LABEL: t_setjmp:
+; CHECK:       .LBB{{[0-9]+}}_5:
+; CHECK-NEXT:    st %s18, 48(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s19, 56(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s20, 64(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s21, 72(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s22, 80(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s23, 88(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s24, 96(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s25, 104(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s26, 112(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s27, 120(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s28, 128(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s29, 136(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s30, 144(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s31, 152(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s32, 160(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s33, 168(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s1, 312(, %s17) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s0, 304(, %s17) # 8-byte Folded Spill
+; CHECK-NEXT:    lea %s0, 15(, %s0)
+; CHECK-NEXT:    and %s0, -16, %s0
+; CHECK-NEXT:    lea %s1, __ve_grow_stack at lo
+; CHECK-NEXT:    and %s1, %s1, (32)0
+; CHECK-NEXT:    lea.sl %s12, __ve_grow_stack at hi(, %s1)
+; CHECK-NEXT:    bsic %s10, (, %s12)
+; CHECK-NEXT:    lea %s1, 240(, %s11)
+; CHECK-NEXT:    st %s1, 328(, %s17)
+; CHECK-NEXT:    lea %s0, .LBB{{[0-9]+}}_3 at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, .LBB{{[0-9]+}}_3 at hi(, %s0)
+; CHECK-NEXT:    st %s17, 24(, %s1)
+; CHECK-NEXT:    st %s1, 296(, %s17) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s0, 8(, %s1)
+; CHECK-NEXT:    # EH_SJlJ_SETUP .LBB{{[0-9]+}}_3
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    lea %s5, 0
+; CHECK-NEXT:    br.l.t .LBB{{[0-9]+}}_2
+; CHECK-NEXT:  .LBB{{[0-9]+}}_3: # Block address taken
+; CHECK-NEXT:    ld %s17, 24(, %s10)
+; CHECK-NEXT:    lea %s5, 1
+; CHECK-NEXT:  .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, whatever at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s12, whatever at hi(, %s0)
+; CHECK-NEXT:    lea %s2, 328(, %s17)
+; CHECK-NEXT:    lea %s3, 320(, %s17)
+; CHECK-NEXT:    ld %s0, 304(, %s17) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s1, 312(, %s17) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s4, 296(, %s17) # 8-byte Folded Reload
+; CHECK-NEXT:    bsic %s10, (, %s12)
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    ld %s33, 168(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s32, 160(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s31, 152(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s30, 144(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s29, 136(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s28, 128(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s27, 120(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s26, 112(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s25, 104(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s24, 96(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s23, 88(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s22, 80(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s21, 72(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s20, 64(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s19, 56(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s18, 48(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    or %s11, 0, %s9
+  %buf = alloca [5 x i8*], align 16
+  %p = alloca i8*, align 8
+  %q = alloca i8, align 64
+  %r = bitcast [5 x i8*]* %buf to i8*
+  %s = alloca i8, i64 %n, align 1
+  store i8* %s, i8** %p, align 8
+  %t = call i32 @llvm.eh.sjlj.setjmp(i8* %s)
+  call void @whatever(i64 %n, %Foo* %f, i8** %p, i8* %q, i8* %s, i32 %t) #1
+  ret i32 0
+}

diff  --git a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_callsite.ll b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_callsite.ll
new file mode 100644
index 000000000000..19ff37920bc0
--- /dev/null
+++ b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_callsite.ll
@@ -0,0 +1,282 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=ve -exception-model sjlj | FileCheck %s
+; RUN: llc < %s -mtriple=ve -exception-model sjlj -relocation-model pic | \
+; RUN:     FileCheck %s -check-prefix=PIC
+
+; Function Attrs: noinline nounwind optnone
+define void @test_callsite() personality i32 (...)* @__gxx_personality_sj0 {
+; CHECK-LABEL: test_callsite:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    st %s9, (, %s11)
+; CHECK-NEXT:    st %s10, 8(, %s11)
+; CHECK-NEXT:    or %s9, 0, %s11
+; CHECK-NEXT:    lea %s11, -432(, %s11)
+; CHECK-NEXT:    brge.l %s11, %s8, .LBB0_7
+; CHECK-NEXT:  # %bb.6:
+; CHECK-NEXT:    ld %s61, 24(, %s14)
+; CHECK-NEXT:    or %s62, 0, %s0
+; CHECK-NEXT:    lea %s63, 315
+; CHECK-NEXT:    shm.l %s63, (%s61)
+; CHECK-NEXT:    shm.l %s8, 8(%s61)
+; CHECK-NEXT:    shm.l %s11, 16(%s61)
+; CHECK-NEXT:    monc
+; CHECK-NEXT:    or %s0, 0, %s62
+; CHECK-NEXT:  .LBB0_7:
+; CHECK-NEXT:    st %s18, 48(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s19, 56(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s20, 64(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s21, 72(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s22, 80(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s23, 88(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s24, 96(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s25, 104(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s26, 112(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s27, 120(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s28, 128(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s29, 136(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s30, 144(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s31, 152(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s32, 160(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s33, 168(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    lea %s0, __gxx_personality_sj0 at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, __gxx_personality_sj0 at hi(, %s0)
+; CHECK-NEXT:    st %s0, -56(, %s9)
+; CHECK-NEXT:    lea %s0, GCC_except_table0 at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, GCC_except_table0 at hi(, %s0)
+; CHECK-NEXT:    st %s0, -48(, %s9)
+; CHECK-NEXT:    st %s9, -40(, %s9)
+; CHECK-NEXT:    st %s11, -24(, %s9)
+; CHECK-NEXT:    lea %s0, .LBB0_3 at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, .LBB0_3 at hi(, %s0)
+; CHECK-NEXT:    st %s0, -32(, %s9)
+; CHECK-NEXT:    or %s0, 1, (0)1
+; CHECK-NEXT:    st %s0, -96(, %s9)
+; CHECK-NEXT:    lea %s0, _Unwind_SjLj_Register at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s12, _Unwind_SjLj_Register at hi(, %s0)
+; CHECK-NEXT:    lea %s0, -104(, %s9)
+; CHECK-NEXT:    bsic %s10, (, %s12)
+; CHECK-NEXT:  .Ltmp0:
+; CHECK-NEXT:    lea %s0, f at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s12, f at hi(, %s0)
+; CHECK-NEXT:    bsic %s10, (, %s12)
+; CHECK-NEXT:  .Ltmp1:
+; CHECK-NEXT:  .LBB0_2: # %try.cont
+; CHECK-NEXT:    or %s0, -1, (0)1
+; CHECK-NEXT:    st %s0, -96(, %s9)
+; CHECK-NEXT:    lea %s0, _Unwind_SjLj_Unregister at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s18, _Unwind_SjLj_Unregister at hi(, %s0)
+; CHECK-NEXT:    lea %s0, -192(, %s9)
+; CHECK-NEXT:    or %s12, 0, %s18
+; CHECK-NEXT:    bsic %s10, (, %s12)
+; CHECK-NEXT:    lea %s0, -104(, %s9)
+; CHECK-NEXT:    or %s12, 0, %s18
+; CHECK-NEXT:    bsic %s10, (, %s12)
+; CHECK-NEXT:    ld %s33, 168(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s32, 160(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s31, 152(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s30, 144(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s29, 136(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s28, 128(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s27, 120(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s26, 112(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s25, 104(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s24, 96(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s23, 88(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s22, 80(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s21, 72(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s20, 64(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s19, 56(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s18, 48(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    or %s11, 0, %s9
+; CHECK-NEXT:    ld %s10, 8(, %s11)
+; CHECK-NEXT:    ld %s9, (, %s11)
+; CHECK-NEXT:    b.l.t (, %s10)
+; CHECK-NEXT:  .LBB0_3:
+; CHECK-NEXT:    ldl.zx %s0, -96(, %s9)
+; CHECK-NEXT:    brgt.l 1, %s0, .LBB0_4
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    lea %s0, abort at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, abort at hi(, %s0)
+; CHECK-NEXT:    bsic %s10, (, %s0)
+; CHECK-NEXT:  .LBB0_4:
+; CHECK-NEXT:    lea %s1, .LJTI0_0 at lo
+; CHECK-NEXT:    and %s1, %s1, (32)0
+; CHECK-NEXT:    lea.sl %s1, .LJTI0_0 at hi(, %s1)
+; CHECK-NEXT:    sll %s0, %s0, 3
+; CHECK-NEXT:    ld %s0, (%s0, %s1)
+; CHECK-NEXT:    b.l.t (, %s0)
+; CHECK-NEXT:  .LBB0_1: # %lpad
+; CHECK-NEXT:  .Ltmp2:
+; CHECK-NEXT:    ld %s0, -88(, %s9)
+; CHECK-NEXT:    ld %s0, -80(, %s9)
+; CHECK-NEXT:    br.l.t .LBB0_2
+;
+; PIC-LABEL: test_callsite:
+; PIC:       # %bb.0:
+; PIC-NEXT:    st %s9, (, %s11)
+; PIC-NEXT:    st %s10, 8(, %s11)
+; PIC-NEXT:    st %s15, 24(, %s11)
+; PIC-NEXT:    st %s16, 32(, %s11)
+; PIC-NEXT:    or %s9, 0, %s11
+; PIC-NEXT:    lea %s11, -432(, %s11)
+; PIC-NEXT:    brge.l %s11, %s8, .LBB0_7
+; PIC-NEXT:  # %bb.6:
+; PIC-NEXT:    ld %s61, 24(, %s14)
+; PIC-NEXT:    or %s62, 0, %s0
+; PIC-NEXT:    lea %s63, 315
+; PIC-NEXT:    shm.l %s63, (%s61)
+; PIC-NEXT:    shm.l %s8, 8(%s61)
+; PIC-NEXT:    shm.l %s11, 16(%s61)
+; PIC-NEXT:    monc
+; PIC-NEXT:    or %s0, 0, %s62
+; PIC-NEXT:  .LBB0_7:
+; PIC-NEXT:    st %s18, 48(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s19, 56(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s20, 64(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s21, 72(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s22, 80(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s23, 88(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s24, 96(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s25, 104(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s26, 112(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s27, 120(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s28, 128(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s29, 136(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s30, 144(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s31, 152(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s32, 160(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s33, 168(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    lea %s15, _GLOBAL_OFFSET_TABLE_ at pc_lo(-24)
+; PIC-NEXT:    and %s15, %s15, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s15, _GLOBAL_OFFSET_TABLE_ at pc_hi(%s16, %s15)
+; PIC-NEXT:    lea %s0, __gxx_personality_sj0 at got_lo
+; PIC-NEXT:    and %s0, %s0, (32)0
+; PIC-NEXT:    lea.sl %s0, __gxx_personality_sj0 at got_hi(, %s0)
+; PIC-NEXT:    ld %s0, (%s0, %s15)
+; PIC-NEXT:    st %s0, -56(, %s9)
+; PIC-NEXT:    lea %s0, GCC_except_table0 at gotoff_lo
+; PIC-NEXT:    and %s0, %s0, (32)0
+; PIC-NEXT:    lea.sl %s0, GCC_except_table0 at gotoff_hi(%s0, %s15)
+; PIC-NEXT:    st %s0, -48(, %s9)
+; PIC-NEXT:    st %s9, -40(, %s9)
+; PIC-NEXT:    st %s11, -24(, %s9)
+; PIC-NEXT:    lea %s0, .LBB0_3 at gotoff_lo
+; PIC-NEXT:    and %s0, %s0, (32)0
+; PIC-NEXT:    lea.sl %s0, .LBB0_3 at gotoff_hi(%s0, %s15)
+; PIC-NEXT:    st %s0, -32(, %s9)
+; PIC-NEXT:    or %s0, 1, (0)1
+; PIC-NEXT:    st %s0, -96(, %s9)
+; PIC-NEXT:    lea %s12, _Unwind_SjLj_Register at plt_lo(-24)
+; PIC-NEXT:    and %s12, %s12, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s12, _Unwind_SjLj_Register at plt_hi(%s16, %s12)
+; PIC-NEXT:    lea %s0, -104(, %s9)
+; PIC-NEXT:    bsic %s10, (, %s12)
+; PIC-NEXT:  .Ltmp0:
+; PIC-NEXT:    lea %s12, f at plt_lo(-24)
+; PIC-NEXT:    and %s12, %s12, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s12, f at plt_hi(%s16, %s12)
+; PIC-NEXT:    bsic %s10, (, %s12)
+; PIC-NEXT:  .Ltmp1:
+; PIC-NEXT:  .LBB0_2: # %try.cont
+; PIC-NEXT:    or %s0, -1, (0)1
+; PIC-NEXT:    st %s0, -96(, %s9)
+; PIC-NEXT:    lea %s18, _Unwind_SjLj_Unregister at plt_lo(-24)
+; PIC-NEXT:    and %s18, %s18, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s18, _Unwind_SjLj_Unregister at plt_hi(%s16, %s18)
+; PIC-NEXT:    lea %s0, -192(, %s9)
+; PIC-NEXT:    or %s12, 0, %s18
+; PIC-NEXT:    bsic %s10, (, %s12)
+; PIC-NEXT:    lea %s0, -104(, %s9)
+; PIC-NEXT:    or %s12, 0, %s18
+; PIC-NEXT:    bsic %s10, (, %s12)
+; PIC-NEXT:    ld %s33, 168(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s32, 160(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s31, 152(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s30, 144(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s29, 136(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s28, 128(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s27, 120(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s26, 112(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s25, 104(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s24, 96(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s23, 88(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s22, 80(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s21, 72(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s20, 64(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s19, 56(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s18, 48(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    or %s11, 0, %s9
+; PIC-NEXT:    ld %s16, 32(, %s11)
+; PIC-NEXT:    ld %s15, 24(, %s11)
+; PIC-NEXT:    ld %s10, 8(, %s11)
+; PIC-NEXT:    ld %s9, (, %s11)
+; PIC-NEXT:    b.l.t (, %s10)
+; PIC-NEXT:  .LBB0_3:
+; PIC-NEXT:    ldl.zx %s0, -96(, %s9)
+; PIC-NEXT:    lea %s15, _GLOBAL_OFFSET_TABLE_ at pc_lo(-24)
+; PIC-NEXT:    and %s15, %s15, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s15, _GLOBAL_OFFSET_TABLE_ at pc_hi(%s16, %s15)
+; PIC-NEXT:    brgt.l 1, %s0, .LBB0_4
+; PIC-NEXT:  # %bb.5:
+; PIC-NEXT:    lea %s0, abort at plt_lo(-24)
+; PIC-NEXT:    and %s0, %s0, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s0, abort at plt_hi(%s16, %s0)
+; PIC-NEXT:    bsic %s10, (, %s0)
+; PIC-NEXT:  .LBB0_4:
+; PIC-NEXT:    lea %s1, .LJTI0_0 at gotoff_lo
+; PIC-NEXT:    and %s1, %s1, (32)0
+; PIC-NEXT:    lea.sl %s1, .LJTI0_0 at gotoff_hi(%s1, %s15)
+; PIC-NEXT:    sll %s0, %s0, 2
+; PIC-NEXT:    ldl.zx %s0, (%s0, %s1)
+; PIC-NEXT:    lea %s1, test_callsite at gotoff_lo
+; PIC-NEXT:    and %s1, %s1, (32)0
+; PIC-NEXT:    lea.sl %s1, test_callsite at gotoff_hi(%s1, %s15)
+; PIC-NEXT:    adds.l %s0, %s0, %s1
+; PIC-NEXT:    b.l.t (, %s0)
+; PIC-NEXT:  .LBB0_1: # %lpad
+; PIC-NEXT:  .Ltmp2:
+; PIC-NEXT:    ld %s0, -88(, %s9)
+; PIC-NEXT:    ld %s0, -80(, %s9)
+; PIC-NEXT:    br.l.t .LBB0_2
+  %fn_context = alloca { i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }, align 4
+  call void @llvm.eh.sjlj.callsite(i32 0)
+  invoke void @f()
+          to label %try.cont unwind label %lpad
+
+lpad:                                             ; preds = %entry
+  %1 = landingpad { i8*, i32 }
+          cleanup
+;;  %__data = getelementptr { i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }, { i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }* %fn_context, i32 0, i32 2
+;;  %exception_gep = getelementptr [4 x i32], [4 x i32]* %__data, i32 0, i32 0
+;;  %exn_val = load volatile i32, i32* %exception_gep, align 4
+;;  %2 = inttoptr i32 %exn_val to i8*
+;;  %exn_selector_gep = getelementptr [4 x i32], [4 x i32]* %__data, i32 0, i32 1
+;;  %exn_selector_val = load volatile i32, i32* %exn_selector_gep, align 4
+  br label %try.cont
+
+try.cont:                                         ; preds = %lpad, %entry
+  call void @_Unwind_SjLj_Unregister({ i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }* %fn_context)
+  ret void
+}
+
+declare void @f()
+
+declare i32 @__gxx_personality_sj0(...)
+
+declare void @_Unwind_SjLj_Unregister({ i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }*)
+
+; Function Attrs: nounwind readnone
+declare void @llvm.eh.sjlj.callsite(i32)

diff  --git a/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_landingpad.ll b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_landingpad.ll
new file mode 100644
index 000000000000..6abb9bc67699
--- /dev/null
+++ b/llvm/test/CodeGen/VE/Scalar/builtin_sjlj_landingpad.ll
@@ -0,0 +1,303 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=ve -exception-model=sjlj < %s | FileCheck %s
+; RUN: llc -mtriple=ve -exception-model=sjlj -relocation-model=pic < %s | \
+; RUN:     FileCheck --check-prefix=PIC %s
+
+ at SomeGlobal = external dso_local global i8
+
+define dso_local i32 @foo(i32 %arg) local_unnamed_addr personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*) {
+; CHECK-LABEL: foo:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    st %s9, (, %s11)
+; CHECK-NEXT:    st %s10, 8(, %s11)
+; CHECK-NEXT:    or %s9, 0, %s11
+; CHECK-NEXT:    lea %s11, -352(, %s11)
+; CHECK-NEXT:    brge.l %s11, %s8, .LBB0_8
+; CHECK-NEXT:  # %bb.7: # %entry
+; CHECK-NEXT:    ld %s61, 24(, %s14)
+; CHECK-NEXT:    or %s62, 0, %s0
+; CHECK-NEXT:    lea %s63, 315
+; CHECK-NEXT:    shm.l %s63, (%s61)
+; CHECK-NEXT:    shm.l %s8, 8(%s61)
+; CHECK-NEXT:    shm.l %s11, 16(%s61)
+; CHECK-NEXT:    monc
+; CHECK-NEXT:    or %s0, 0, %s62
+; CHECK-NEXT:  .LBB0_8: # %entry
+; CHECK-NEXT:    st %s18, 48(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s19, 56(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s20, 64(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s21, 72(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s22, 80(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s23, 88(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s24, 96(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s25, 104(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s26, 112(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s27, 120(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s28, 128(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s29, 136(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s30, 144(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s31, 152(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s32, 160(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    st %s33, 168(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT:    lea %s0, __gxx_personality_sj0 at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, __gxx_personality_sj0 at hi(, %s0)
+; CHECK-NEXT:    st %s0, -56(, %s9)
+; CHECK-NEXT:    lea %s0, GCC_except_table0 at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, GCC_except_table0 at hi(, %s0)
+; CHECK-NEXT:    st %s0, -48(, %s9)
+; CHECK-NEXT:    st %s9, -40(, %s9)
+; CHECK-NEXT:    st %s11, -24(, %s9)
+; CHECK-NEXT:    lea %s0, .LBB0_3 at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, .LBB0_3 at hi(, %s0)
+; CHECK-NEXT:    st %s0, -32(, %s9)
+; CHECK-NEXT:    or %s0, 1, (0)1
+; CHECK-NEXT:    st %s0, -96(, %s9)
+; CHECK-NEXT:    lea %s0, _Unwind_SjLj_Register at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s12, _Unwind_SjLj_Register at hi(, %s0)
+; CHECK-NEXT:    lea %s0, -104(, %s9)
+; CHECK-NEXT:    bsic %s10, (, %s12)
+; CHECK-NEXT:  .Ltmp0:
+; CHECK-NEXT:    lea %s0, errorbar at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s12, errorbar at hi(, %s0)
+; CHECK-NEXT:    bsic %s10, (, %s12)
+; CHECK-NEXT:  .Ltmp1:
+; CHECK-NEXT:  # %bb.1: # %exit
+; CHECK-NEXT:    lea %s0, _Unwind_SjLj_Unregister at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s12, _Unwind_SjLj_Unregister at hi(, %s0)
+; CHECK-NEXT:    lea %s0, -104(, %s9)
+; CHECK-NEXT:    bsic %s10, (, %s12)
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:  .LBB0_2: # %exit
+; CHECK-NEXT:    ld %s33, 168(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s32, 160(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s31, 152(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s30, 144(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s29, 136(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s28, 128(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s27, 120(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s26, 112(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s25, 104(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s24, 96(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s23, 88(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s22, 80(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s21, 72(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s20, 64(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s19, 56(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    ld %s18, 48(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT:    or %s11, 0, %s9
+; CHECK-NEXT:    ld %s10, 8(, %s11)
+; CHECK-NEXT:    ld %s9, (, %s11)
+; CHECK-NEXT:    b.l.t (, %s10)
+; CHECK-NEXT:  .LBB0_3:
+; CHECK-NEXT:    ldl.zx %s0, -96(, %s9)
+; CHECK-NEXT:    brgt.l 1, %s0, .LBB0_4
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    lea %s0, abort at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, abort at hi(, %s0)
+; CHECK-NEXT:    bsic %s10, (, %s0)
+; CHECK-NEXT:  .LBB0_4:
+; CHECK-NEXT:    lea %s1, .LJTI0_0 at lo
+; CHECK-NEXT:    and %s1, %s1, (32)0
+; CHECK-NEXT:    lea.sl %s1, .LJTI0_0 at hi(, %s1)
+; CHECK-NEXT:    sll %s0, %s0, 3
+; CHECK-NEXT:    ld %s0, (%s0, %s1)
+; CHECK-NEXT:    b.l.t (, %s0)
+; CHECK-NEXT:  .LBB0_6: # %handle
+; CHECK-NEXT:  .Ltmp2:
+; CHECK-NEXT:    ld %s0, -88(, %s9)
+; CHECK-NEXT:    ld %s0, -80(, %s9)
+; CHECK-NEXT:    lea %s0, _Unwind_SjLj_Unregister at lo
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s12, _Unwind_SjLj_Unregister at hi(, %s0)
+; CHECK-NEXT:    lea %s0, -104(, %s9)
+; CHECK-NEXT:    bsic %s10, (, %s12)
+; CHECK-NEXT:    or %s0, 1, (0)1
+; CHECK-NEXT:    br.l.t .LBB0_2
+;
+; PIC-LABEL: foo:
+; PIC:       # %bb.0: # %entry
+; PIC-NEXT:    st %s9, (, %s11)
+; PIC-NEXT:    st %s10, 8(, %s11)
+; PIC-NEXT:    st %s15, 24(, %s11)
+; PIC-NEXT:    st %s16, 32(, %s11)
+; PIC-NEXT:    or %s9, 0, %s11
+; PIC-NEXT:    lea %s11, -352(, %s11)
+; PIC-NEXT:    brge.l %s11, %s8, .LBB0_8
+; PIC-NEXT:  # %bb.7: # %entry
+; PIC-NEXT:    ld %s61, 24(, %s14)
+; PIC-NEXT:    or %s62, 0, %s0
+; PIC-NEXT:    lea %s63, 315
+; PIC-NEXT:    shm.l %s63, (%s61)
+; PIC-NEXT:    shm.l %s8, 8(%s61)
+; PIC-NEXT:    shm.l %s11, 16(%s61)
+; PIC-NEXT:    monc
+; PIC-NEXT:    or %s0, 0, %s62
+; PIC-NEXT:  .LBB0_8: # %entry
+; PIC-NEXT:    st %s18, 48(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s19, 56(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s20, 64(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s21, 72(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s22, 80(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s23, 88(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s24, 96(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s25, 104(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s26, 112(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s27, 120(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s28, 128(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s29, 136(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s30, 144(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s31, 152(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s32, 160(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    st %s33, 168(, %s9) # 8-byte Folded Spill
+; PIC-NEXT:    lea %s15, _GLOBAL_OFFSET_TABLE_ at pc_lo(-24)
+; PIC-NEXT:    and %s15, %s15, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s15, _GLOBAL_OFFSET_TABLE_ at pc_hi(%s16, %s15)
+; PIC-NEXT:    lea %s0, __gxx_personality_sj0 at got_lo
+; PIC-NEXT:    and %s0, %s0, (32)0
+; PIC-NEXT:    lea.sl %s0, __gxx_personality_sj0 at got_hi(, %s0)
+; PIC-NEXT:    ld %s0, (%s0, %s15)
+; PIC-NEXT:    st %s0, -56(, %s9)
+; PIC-NEXT:    lea %s0, GCC_except_table0 at gotoff_lo
+; PIC-NEXT:    and %s0, %s0, (32)0
+; PIC-NEXT:    lea.sl %s0, GCC_except_table0 at gotoff_hi(%s0, %s15)
+; PIC-NEXT:    st %s0, -48(, %s9)
+; PIC-NEXT:    st %s9, -40(, %s9)
+; PIC-NEXT:    st %s11, -24(, %s9)
+; PIC-NEXT:    lea %s0, .LBB0_3 at gotoff_lo
+; PIC-NEXT:    and %s0, %s0, (32)0
+; PIC-NEXT:    lea.sl %s0, .LBB0_3 at gotoff_hi(%s0, %s15)
+; PIC-NEXT:    st %s0, -32(, %s9)
+; PIC-NEXT:    or %s0, 1, (0)1
+; PIC-NEXT:    st %s0, -96(, %s9)
+; PIC-NEXT:    lea %s12, _Unwind_SjLj_Register at plt_lo(-24)
+; PIC-NEXT:    and %s12, %s12, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s12, _Unwind_SjLj_Register at plt_hi(%s16, %s12)
+; PIC-NEXT:    lea %s0, -104(, %s9)
+; PIC-NEXT:    bsic %s10, (, %s12)
+; PIC-NEXT:  .Ltmp0:
+; PIC-NEXT:    lea %s12, errorbar at plt_lo(-24)
+; PIC-NEXT:    and %s12, %s12, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s12, errorbar at plt_hi(%s16, %s12)
+; PIC-NEXT:    bsic %s10, (, %s12)
+; PIC-NEXT:  .Ltmp1:
+; PIC-NEXT:  # %bb.1: # %exit
+; PIC-NEXT:    lea %s12, _Unwind_SjLj_Unregister at plt_lo(-24)
+; PIC-NEXT:    and %s12, %s12, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s12, _Unwind_SjLj_Unregister at plt_hi(%s16, %s12)
+; PIC-NEXT:    lea %s0, -104(, %s9)
+; PIC-NEXT:    bsic %s10, (, %s12)
+; PIC-NEXT:    or %s0, 0, (0)1
+; PIC-NEXT:  .LBB0_2: # %exit
+; PIC-NEXT:    ld %s33, 168(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s32, 160(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s31, 152(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s30, 144(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s29, 136(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s28, 128(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s27, 120(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s26, 112(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s25, 104(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s24, 96(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s23, 88(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s22, 80(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s21, 72(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s20, 64(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s19, 56(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    ld %s18, 48(, %s9) # 8-byte Folded Reload
+; PIC-NEXT:    or %s11, 0, %s9
+; PIC-NEXT:    ld %s16, 32(, %s11)
+; PIC-NEXT:    ld %s15, 24(, %s11)
+; PIC-NEXT:    ld %s10, 8(, %s11)
+; PIC-NEXT:    ld %s9, (, %s11)
+; PIC-NEXT:    b.l.t (, %s10)
+; PIC-NEXT:  .LBB0_3:
+; PIC-NEXT:    ldl.zx %s0, -96(, %s9)
+; PIC-NEXT:    lea %s15, _GLOBAL_OFFSET_TABLE_ at pc_lo(-24)
+; PIC-NEXT:    and %s15, %s15, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s15, _GLOBAL_OFFSET_TABLE_ at pc_hi(%s16, %s15)
+; PIC-NEXT:    brgt.l 1, %s0, .LBB0_4
+; PIC-NEXT:  # %bb.5:
+; PIC-NEXT:    lea %s0, abort at plt_lo(-24)
+; PIC-NEXT:    and %s0, %s0, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s0, abort at plt_hi(%s16, %s0)
+; PIC-NEXT:    bsic %s10, (, %s0)
+; PIC-NEXT:  .LBB0_4:
+; PIC-NEXT:    lea %s1, .LJTI0_0 at gotoff_lo
+; PIC-NEXT:    and %s1, %s1, (32)0
+; PIC-NEXT:    lea.sl %s1, .LJTI0_0 at gotoff_hi(%s1, %s15)
+; PIC-NEXT:    sll %s0, %s0, 2
+; PIC-NEXT:    ldl.zx %s0, (%s0, %s1)
+; PIC-NEXT:    lea %s1, foo at gotoff_lo
+; PIC-NEXT:    and %s1, %s1, (32)0
+; PIC-NEXT:    lea.sl %s1, foo at gotoff_hi(%s1, %s15)
+; PIC-NEXT:    adds.l %s0, %s0, %s1
+; PIC-NEXT:    b.l.t (, %s0)
+; PIC-NEXT:  .LBB0_6: # %handle
+; PIC-NEXT:  .Ltmp2:
+; PIC-NEXT:    ld %s0, -88(, %s9)
+; PIC-NEXT:    ld %s0, -80(, %s9)
+; PIC-NEXT:    lea %s12, _Unwind_SjLj_Unregister at plt_lo(-24)
+; PIC-NEXT:    and %s12, %s12, (32)0
+; PIC-NEXT:    sic %s16
+; PIC-NEXT:    lea.sl %s12, _Unwind_SjLj_Unregister at plt_hi(%s16, %s12)
+; PIC-NEXT:    lea %s0, -104(, %s9)
+; PIC-NEXT:    bsic %s10, (, %s12)
+; PIC-NEXT:    or %s0, 1, (0)1
+; PIC-NEXT:    br.l.t .LBB0_2
+; PIC-NEXT:  .Lfunc_end0:
+; PIC-NEXT:    .size	foo, .Lfunc_end0-foo
+; PIC-NEXT:    .section	.rodata,"a", at progbits
+; PIC-NEXT:    .p2align	2
+; PIC-NEXT:  .LJTI0_0:
+; PIC-NEXT:    .4byte	.LBB0_6-foo
+; PIC-NEXT:    .section	.gcc_except_table,"a", at progbits
+; PIC-NEXT:    .p2align	2
+; PIC-NEXT:  GCC_except_table0:
+; PIC-NEXT:  .Lexception0:
+; PIC-NEXT:    .byte	255 # @LPStart Encoding = omit
+; PIC-NEXT:    .byte	0 # @TType Encoding = absptr
+; PIC-NEXT:    .uleb128 .Lttbase0-.Lttbaseref0
+; PIC-NEXT:  .Lttbaseref0:
+; PIC-NEXT:    .byte	3 # Call site Encoding = udata4
+; PIC-NEXT:    .uleb128 .Lcst_end0-.Lcst_begin0
+; PIC-NEXT:  .Lcst_begin0:
+; PIC-NEXT:    .byte	0 # >> Call Site 0 <<
+; PIC-NEXT:        #   On exception at call site 0
+; PIC-NEXT:    .byte	1 #   Action: 1
+; PIC-NEXT:  .Lcst_end0:
+; PIC-NEXT:    .byte	1 # >> Action Record 1 <<
+; PIC-NEXT:        #   Catch TypeInfo 1
+; PIC-NEXT:    .byte	0 #   No further actions
+; PIC-NEXT:    .p2align	2
+; PIC-NEXT:        # >> Catch TypeInfos <<
+; PIC-NEXT:    .8byte	SomeGlobal                      # TypeInfo 1
+; PIC-NEXT:  .Lttbase0:
+; PIC-NEXT:    .p2align	2
+; PIC-NEXT:        # -- End function
+entry:
+  invoke void @errorbar() to label %exit unwind label %handle
+
+handle:
+  %error = landingpad { i8*, i32 } catch i8* @SomeGlobal
+  ret i32 1
+
+exit:
+  ret i32 0
+}
+
+declare dso_local void @errorbar() local_unnamed_addr
+
+declare dso_local i32 @__gxx_personality_sj0(...)

diff  --git a/llvm/test/CodeGen/VE/Scalar/sjlj_except.ll b/llvm/test/CodeGen/VE/Scalar/sjlj_except.ll
deleted file mode 100644
index 4d2558571bf4..000000000000
--- a/llvm/test/CodeGen/VE/Scalar/sjlj_except.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc  -mtriple=x86_64-unknown-unknown --exception-model=sjlj --print-after=sjljehprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-X86 %s
-; RUN: (llc  -mtriple=ve-unknown-unknown --exception-model=sjlj  --print-after=sjljehprepare < %s || true) 2>&1 | FileCheck --check-prefix=CHECK-VE %s
-
- at SomeGlobal = external dso_local global i8
-
-define dso_local i32 @foo(i32 %arg) local_unnamed_addr personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*) {
-; CHECK-VE: *** IR Dump After SJLJ Exception Handling preparation ***
-; CHECK-VE-NEXT: define dso_local i32 @foo(i32 %arg) local_unnamed_addr personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*) {
-; CHECK-VE-NEXT: entry:
-; CHECK-VE-NEXT:   %fn_context = alloca { i8*, i64, [4 x i64], i8*, i8*, [5 x i8*] }, align 8
-; CHECK-VE-NEXT:   %arg.tmp = select i1 true, i32 %arg, i32 undef
-; CHECK-VE-NEXT:   %pers_fn_gep = getelementptr { i8*, i64, [4 x i64], i8*, i8*, [5 x i8*] }, { i8*, i64, [4 x i64], i8*, i8*, [5 x i8*] }* %fn_context, i32 0, i32 3
-; CHECK-X86: *** IR Dump After SJLJ Exception Handling preparation ***
-; CHECK-X86-NEXT: define dso_local i32 @foo(i32 %arg) local_unnamed_addr personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*) {
-; CHECK-X86-NEXT: entry:
-; CHECK-X86-NEXT:   %fn_context = alloca { i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }, align 8
-; CHECK-X86-NEXT:   %arg.tmp = select i1 true, i32 %arg, i32 undef
-; CHECK-X86-NEXT:   %pers_fn_gep = getelementptr { i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }, { i8*, i32, [4 x i32], i8*, i8*, [5 x i8*] }* %fn_context, i32 0, i32 3
-entry:
-  invoke void @errorbar() to label %exit unwind label %handle
-
-handle:
-  %error = landingpad { i8*, i32 } catch i8* @SomeGlobal
-  ret i32 1
-
-exit:
-  ret i32 0
-}
-
-declare dso_local void @errorbar() local_unnamed_addr
-
-declare dso_local i32 @__gxx_personality_sj0(...)


        


More information about the llvm-branch-commits mailing list