[llvm] r331203 - [SystemZ] Handle SADDO et.al. and ADD/SUBCARRY

Ulrich Weigand via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 30 10:54:28 PDT 2018


Author: uweigand
Date: Mon Apr 30 10:54:28 2018
New Revision: 331203

URL: http://llvm.org/viewvc/llvm-project?rev=331203&view=rev
Log:
[SystemZ] Handle SADDO et.al. and ADD/SUBCARRY

This provides an optimized implementation of SADDO/SSUBO/UADDO/USUBO
as well as ADDCARRY/SUBCARRY on top of the new CC implementation.

In particular, multi-word arithmetic now uses UADDO/ADDCARRY instead
of the old ADDC/ADDE logic, which means we no longer need to use
"glue" links for those instructions.  This also allows making full
use of the memory-based instructions like ALSI, which couldn't be
recognized due to limitations in the DAG matcher previously.

Also, the llvm.sadd.with.overflow et.al. intrinsincs now expand to
directly using the ADD instructions and checking for a CC 3 result.


Added:
    llvm/trunk/test/CodeGen/SystemZ/int-sadd-01.ll
    llvm/trunk/test/CodeGen/SystemZ/int-sadd-02.ll
    llvm/trunk/test/CodeGen/SystemZ/int-sadd-03.ll
    llvm/trunk/test/CodeGen/SystemZ/int-sadd-04.ll
    llvm/trunk/test/CodeGen/SystemZ/int-sadd-05.ll
    llvm/trunk/test/CodeGen/SystemZ/int-sadd-06.ll
    llvm/trunk/test/CodeGen/SystemZ/int-sadd-07.ll
    llvm/trunk/test/CodeGen/SystemZ/int-sadd-08.ll
    llvm/trunk/test/CodeGen/SystemZ/int-sadd-09.ll
    llvm/trunk/test/CodeGen/SystemZ/int-ssub-01.ll
    llvm/trunk/test/CodeGen/SystemZ/int-ssub-02.ll
    llvm/trunk/test/CodeGen/SystemZ/int-ssub-03.ll
    llvm/trunk/test/CodeGen/SystemZ/int-ssub-04.ll
    llvm/trunk/test/CodeGen/SystemZ/int-ssub-05.ll
    llvm/trunk/test/CodeGen/SystemZ/int-ssub-06.ll
    llvm/trunk/test/CodeGen/SystemZ/int-ssub-07.ll
    llvm/trunk/test/CodeGen/SystemZ/int-ssub-08.ll
    llvm/trunk/test/CodeGen/SystemZ/int-ssub-09.ll
    llvm/trunk/test/CodeGen/SystemZ/int-uadd-01.ll
    llvm/trunk/test/CodeGen/SystemZ/int-uadd-02.ll
    llvm/trunk/test/CodeGen/SystemZ/int-uadd-03.ll
    llvm/trunk/test/CodeGen/SystemZ/int-uadd-04.ll
    llvm/trunk/test/CodeGen/SystemZ/int-uadd-05.ll
    llvm/trunk/test/CodeGen/SystemZ/int-uadd-06.ll
    llvm/trunk/test/CodeGen/SystemZ/int-uadd-07.ll
    llvm/trunk/test/CodeGen/SystemZ/int-uadd-08.ll
    llvm/trunk/test/CodeGen/SystemZ/int-uadd-09.ll
    llvm/trunk/test/CodeGen/SystemZ/int-uadd-10.ll
    llvm/trunk/test/CodeGen/SystemZ/int-uadd-11.ll
    llvm/trunk/test/CodeGen/SystemZ/int-usub-01.ll
    llvm/trunk/test/CodeGen/SystemZ/int-usub-02.ll
    llvm/trunk/test/CodeGen/SystemZ/int-usub-03.ll
    llvm/trunk/test/CodeGen/SystemZ/int-usub-04.ll
    llvm/trunk/test/CodeGen/SystemZ/int-usub-05.ll
    llvm/trunk/test/CodeGen/SystemZ/int-usub-06.ll
    llvm/trunk/test/CodeGen/SystemZ/int-usub-07.ll
    llvm/trunk/test/CodeGen/SystemZ/int-usub-08.ll
    llvm/trunk/test/CodeGen/SystemZ/int-usub-09.ll
    llvm/trunk/test/CodeGen/SystemZ/int-usub-10.ll
    llvm/trunk/test/CodeGen/SystemZ/int-usub-11.ll
Modified:
    llvm/trunk/lib/Target/SystemZ/SystemZ.h
    llvm/trunk/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
    llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp
    llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.h
    llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp
    llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td
    llvm/trunk/lib/Target/SystemZ/SystemZOperands.td
    llvm/trunk/lib/Target/SystemZ/SystemZOperators.td
    llvm/trunk/test/CodeGen/SystemZ/asm-18.ll
    llvm/trunk/test/CodeGen/SystemZ/int-add-08.ll
    llvm/trunk/test/CodeGen/SystemZ/int-sub-05.ll

Modified: llvm/trunk/lib/Target/SystemZ/SystemZ.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZ.h?rev=331203&r1=331202&r2=331203&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZ.h (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZ.h Mon Apr 30 10:54:28 2018
@@ -47,6 +47,22 @@ const unsigned CCMASK_CMP_O  = CCMASK_AN
 const unsigned CCMASK_ICMP = CCMASK_0 | CCMASK_1 | CCMASK_2;
 const unsigned CCMASK_FCMP = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
 
+// Condition-code mask assignments for arithmetical operations.
+const unsigned CCMASK_ARITH_EQ       = CCMASK_0;
+const unsigned CCMASK_ARITH_LT       = CCMASK_1;
+const unsigned CCMASK_ARITH_GT       = CCMASK_2;
+const unsigned CCMASK_ARITH_OVERFLOW = CCMASK_3;
+const unsigned CCMASK_ARITH          = CCMASK_ANY;
+
+// Condition-code mask assignments for logical operations.
+const unsigned CCMASK_LOGICAL_ZERO     = CCMASK_0 | CCMASK_2;
+const unsigned CCMASK_LOGICAL_NONZERO  = CCMASK_1 | CCMASK_2;
+const unsigned CCMASK_LOGICAL_CARRY    = CCMASK_2 | CCMASK_3;
+const unsigned CCMASK_LOGICAL_NOCARRY  = CCMASK_0 | CCMASK_1;
+const unsigned CCMASK_LOGICAL_BORROW   = CCMASK_LOGICAL_NOCARRY;
+const unsigned CCMASK_LOGICAL_NOBORROW = CCMASK_LOGICAL_CARRY;
+const unsigned CCMASK_LOGICAL          = CCMASK_ANY;
+
 // Condition-code mask assignments for CS.
 const unsigned CCMASK_CS_EQ = CCMASK_0;
 const unsigned CCMASK_CS_NE = CCMASK_1;

Modified: llvm/trunk/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp?rev=331203&r1=331202&r2=331203&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp Mon Apr 30 10:54:28 2018
@@ -310,6 +310,11 @@ class SystemZDAGToDAGISel : public Selec
   // Try to use scatter instruction Opcode to implement store Store.
   bool tryScatter(StoreSDNode *Store, unsigned Opcode);
 
+  // Change a chain of {load; op; store} of the same value into a simple op
+  // through memory of that value, if the uses of the modified value and its
+  // address are suitable.
+  bool tryFoldLoadStoreIntoMemOperand(SDNode *Node);
+
   // Return true if Load and Store are loads and stores of the same size
   // and are guaranteed not to overlap.  Such operations can be implemented
   // using block (SS-format) instructions.
@@ -1196,6 +1201,171 @@ bool SystemZDAGToDAGISel::tryScatter(Sto
   return true;
 }
 
+// Check whether or not the chain ending in StoreNode is suitable for doing
+// the {load; op; store} to modify transformation.
+static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
+                                        SDValue StoredVal, SelectionDAG *CurDAG,
+                                        LoadSDNode *&LoadNode,
+                                        SDValue &InputChain) {
+  // Is the stored value result 0 of the operation?
+  if (StoredVal.getResNo() != 0)
+    return false;
+
+  // Are there other uses of the loaded value than the operation?
+  if (!StoredVal.getNode()->hasNUsesOfValue(1, 0))
+    return false;
+
+  // Is the store non-extending and non-indexed?
+  if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
+    return false;
+
+  SDValue Load = StoredVal->getOperand(0);
+  // Is the stored value a non-extending and non-indexed load?
+  if (!ISD::isNormalLoad(Load.getNode()))
+    return false;
+
+  // Return LoadNode by reference.
+  LoadNode = cast<LoadSDNode>(Load);
+
+  // Is store the only read of the loaded value?
+  if (!Load.hasOneUse())
+    return false;
+
+  // Is the address of the store the same as the load?
+  if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
+      LoadNode->getOffset() != StoreNode->getOffset())
+    return false;
+
+  // Check if the chain is produced by the load or is a TokenFactor with
+  // the load output chain as an operand. Return InputChain by reference.
+  SDValue Chain = StoreNode->getChain();
+
+  bool ChainCheck = false;
+  if (Chain == Load.getValue(1)) {
+    ChainCheck = true;
+    InputChain = LoadNode->getChain();
+  } else if (Chain.getOpcode() == ISD::TokenFactor) {
+    SmallVector<SDValue, 4> ChainOps;
+    for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
+      SDValue Op = Chain.getOperand(i);
+      if (Op == Load.getValue(1)) {
+        ChainCheck = true;
+        // Drop Load, but keep its chain. No cycle check necessary.
+        ChainOps.push_back(Load.getOperand(0));
+        continue;
+      }
+
+      // Make sure using Op as part of the chain would not cause a cycle here.
+      // In theory, we could check whether the chain node is a predecessor of
+      // the load. But that can be very expensive. Instead visit the uses and
+      // make sure they all have smaller node id than the load.
+      int LoadId = LoadNode->getNodeId();
+      for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+             UE = UI->use_end(); UI != UE; ++UI) {
+        if (UI.getUse().getResNo() != 0)
+          continue;
+        if (UI->getNodeId() > LoadId)
+          return false;
+      }
+
+      ChainOps.push_back(Op);
+    }
+
+    if (ChainCheck)
+      // Make a new TokenFactor with all the other input chains except
+      // for the load.
+      InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
+                                   MVT::Other, ChainOps);
+  }
+  if (!ChainCheck)
+    return false;
+
+  return true;
+}
+
+// Change a chain of {load; op; store} of the same value into a simple op
+// through memory of that value, if the uses of the modified value and its
+// address are suitable.
+//
+// The tablegen pattern memory operand pattern is currently not able to match
+// the case where the CC on the original operation are used.
+//
+// See the equivalent routine in X86ISelDAGToDAG for further comments.
+bool SystemZDAGToDAGISel::tryFoldLoadStoreIntoMemOperand(SDNode *Node) {
+  StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
+  SDValue StoredVal = StoreNode->getOperand(1);
+  unsigned Opc = StoredVal->getOpcode();
+  SDLoc DL(StoreNode);
+
+  // Before we try to select anything, make sure this is memory operand size
+  // and opcode we can handle. Note that this must match the code below that
+  // actually lowers the opcodes.
+  EVT MemVT = StoreNode->getMemoryVT();
+  unsigned NewOpc = 0;
+  bool NegateOperand = false;
+  switch (Opc) {
+  default:
+    return false;
+  case SystemZISD::SSUBO:
+    NegateOperand = true;
+    /* fall through */
+  case SystemZISD::SADDO:
+    if (MemVT == MVT::i32)
+      NewOpc = SystemZ::ASI;
+    else if (MemVT == MVT::i64)
+      NewOpc = SystemZ::AGSI;
+    else
+      return false;
+    break;
+  case SystemZISD::USUBO:
+    NegateOperand = true;
+    /* fall through */
+  case SystemZISD::UADDO:
+    if (MemVT == MVT::i32)
+      NewOpc = SystemZ::ALSI;
+    else if (MemVT == MVT::i64)
+      NewOpc = SystemZ::ALGSI;
+    else
+      return false;
+    break;
+  }
+
+  LoadSDNode *LoadNode = nullptr;
+  SDValue InputChain;
+  if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadNode,
+                                   InputChain))
+    return false;
+
+  SDValue Operand = StoredVal.getOperand(1);
+  auto *OperandC = dyn_cast<ConstantSDNode>(Operand);
+  if (!OperandC)
+    return false;
+  auto OperandV = OperandC->getAPIntValue();
+  if (NegateOperand)
+    OperandV = -OperandV;
+  if (OperandV.getMinSignedBits() > 8)
+    return false;
+  Operand = CurDAG->getTargetConstant(OperandV, DL, MemVT);
+
+  SDValue Base, Disp;
+  if (!selectBDAddr20Only(StoreNode->getBasePtr(), Base, Disp))
+    return false;
+
+  SDValue Ops[] = { Base, Disp, Operand, InputChain };
+  MachineSDNode *Result =
+    CurDAG->getMachineNode(NewOpc, DL, MVT::i32, MVT::Other, Ops);
+
+  MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
+  MemOp[0] = StoreNode->getMemOperand();
+  MemOp[1] = LoadNode->getMemOperand();
+  Result->setMemRefs(MemOp, MemOp + 2);
+
+  ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
+  ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
+  CurDAG->RemoveDeadNode(Node);
+  return true;
+}
+
 bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
                                                LoadSDNode *Load) const {
   // Check that the two memory operands have the same size.
@@ -1358,6 +1528,8 @@ void SystemZDAGToDAGISel::Select(SDNode
   }
 
   case ISD::STORE: {
+    if (tryFoldLoadStoreIntoMemOperand(Node))
+      return;
     auto *Store = cast<StoreSDNode>(Node);
     unsigned ElemBitSize = Store->getValue().getValueSizeInBits();
     if (ElemBitSize == 32) {

Modified: llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp?rev=331203&r1=331202&r2=331203&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp Mon Apr 30 10:54:28 2018
@@ -164,6 +164,18 @@ SystemZTargetLowering::SystemZTargetLowe
       setOperationAction(ISD::SDIVREM, VT, Custom);
       setOperationAction(ISD::UDIVREM, VT, Custom);
 
+      // Support addition/subtraction with overflow.
+      setOperationAction(ISD::SADDO, VT, Custom);
+      setOperationAction(ISD::SSUBO, VT, Custom);
+
+      // Support addition/subtraction with carry.
+      setOperationAction(ISD::UADDO, VT, Custom);
+      setOperationAction(ISD::USUBO, VT, Custom);
+
+      // Support carry in as value rather than glue.
+      setOperationAction(ISD::ADDCARRY, VT, Custom);
+      setOperationAction(ISD::SUBCARRY, VT, Custom);
+
       // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
       // stores, putting a serialization instruction after the stores.
       setOperationAction(ISD::ATOMIC_LOAD,  VT, Custom);
@@ -3204,6 +3216,99 @@ SDValue SystemZTargetLowering::lowerOR(S
                                    MVT::i64, HighOp, Low32);
 }
 
+// Lower SADDO/SSUBO/UADDO/USUBO nodes.
+SDValue SystemZTargetLowering::lowerXALUO(SDValue Op,
+                                          SelectionDAG &DAG) const {
+  SDNode *N = Op.getNode();
+  SDValue LHS = N->getOperand(0);
+  SDValue RHS = N->getOperand(1);
+  SDLoc DL(N);
+  unsigned BaseOp = 0;
+  unsigned CCValid = 0;
+  unsigned CCMask = 0;
+
+  switch (Op.getOpcode()) {
+  default: llvm_unreachable("Unknown instruction!");
+  case ISD::SADDO:
+    BaseOp = SystemZISD::SADDO;
+    CCValid = SystemZ::CCMASK_ARITH;
+    CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
+    break;
+  case ISD::SSUBO:
+    BaseOp = SystemZISD::SSUBO;
+    CCValid = SystemZ::CCMASK_ARITH;
+    CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
+    break;
+  case ISD::UADDO:
+    BaseOp = SystemZISD::UADDO;
+    CCValid = SystemZ::CCMASK_LOGICAL;
+    CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
+    break;
+  case ISD::USUBO:
+    BaseOp = SystemZISD::USUBO;
+    CCValid = SystemZ::CCMASK_LOGICAL;
+    CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
+    break;
+  }
+
+  SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
+  SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
+
+  SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
+  if (N->getValueType(1) == MVT::i1)
+    SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
+
+  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
+}
+
+// Lower ADDCARRY/SUBCARRY nodes.
+SDValue SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op,
+                                                SelectionDAG &DAG) const {
+
+  SDNode *N = Op.getNode();
+  MVT VT = N->getSimpleValueType(0);
+
+  // Let legalize expand this if it isn't a legal type yet.
+  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
+    return SDValue();
+
+  SDValue LHS = N->getOperand(0);
+  SDValue RHS = N->getOperand(1);
+  SDValue Carry = Op.getOperand(2);
+  SDLoc DL(N);
+  unsigned BaseOp = 0;
+  unsigned CCValid = 0;
+  unsigned CCMask = 0;
+
+  switch (Op.getOpcode()) {
+  default: llvm_unreachable("Unknown instruction!");
+  case ISD::ADDCARRY:
+    BaseOp = SystemZISD::ADDCARRY;
+    CCValid = SystemZ::CCMASK_LOGICAL;
+    CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
+    break;
+  case ISD::SUBCARRY:
+    BaseOp = SystemZISD::SUBCARRY;
+    CCValid = SystemZ::CCMASK_LOGICAL;
+    CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
+    break;
+  }
+
+  // Set the condition code from the carry flag.
+  Carry = DAG.getNode(SystemZISD::GET_CCMASK, DL, MVT::i32, Carry,
+                      DAG.getConstant(CCValid, DL, MVT::i32),
+                      DAG.getConstant(CCMask, DL, MVT::i32));
+
+  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
+  SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS, Carry);
+
+  SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
+  if (N->getValueType(1) == MVT::i1)
+    SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
+
+  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
+}
+
 SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
                                           SelectionDAG &DAG) const {
   EVT VT = Op.getValueType();
@@ -4693,6 +4798,14 @@ SDValue SystemZTargetLowering::LowerOper
     return lowerSDIVREM(Op, DAG);
   case ISD::UDIVREM:
     return lowerUDIVREM(Op, DAG);
+  case ISD::SADDO:
+  case ISD::SSUBO:
+  case ISD::UADDO:
+  case ISD::USUBO:
+    return lowerXALUO(Op, DAG);
+  case ISD::ADDCARRY:
+  case ISD::SUBCARRY:
+    return lowerADDSUBCARRY(Op, DAG);
   case ISD::OR:
     return lowerOR(Op, DAG);
   case ISD::CTPOP:
@@ -4871,6 +4984,13 @@ const char *SystemZTargetLowering::getTa
     OPCODE(UMUL_LOHI);
     OPCODE(SDIVREM);
     OPCODE(UDIVREM);
+    OPCODE(SADDO);
+    OPCODE(SSUBO);
+    OPCODE(UADDO);
+    OPCODE(USUBO);
+    OPCODE(ADDCARRY);
+    OPCODE(SUBCARRY);
+    OPCODE(GET_CCMASK);
     OPCODE(MVC);
     OPCODE(MVC_LOOP);
     OPCODE(NC);
@@ -5560,6 +5680,48 @@ SDValue SystemZTargetLowering::combineSE
   return SDValue();
 }
 
+
+SDValue SystemZTargetLowering::combineGET_CCMASK(
+    SDNode *N, DAGCombinerInfo &DCI) const {
+
+  // Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible
+  auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1));
+  auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2));
+  if (!CCValid || !CCMask)
+    return SDValue();
+  int CCValidVal = CCValid->getZExtValue();
+  int CCMaskVal = CCMask->getZExtValue();
+
+  SDValue Select = N->getOperand(0);
+  if (Select->getOpcode() != SystemZISD::SELECT_CCMASK)
+    return SDValue();
+
+  auto *SelectCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2));
+  auto *SelectCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3));
+  if (!SelectCCValid || !SelectCCMask)
+    return SDValue();
+  int SelectCCValidVal = SelectCCValid->getZExtValue();
+  int SelectCCMaskVal = SelectCCMask->getZExtValue();
+
+  auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0));
+  auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1));
+  if (!TrueVal || !FalseVal)
+    return SDValue();
+  if (TrueVal->getZExtValue() != 0 && FalseVal->getZExtValue() == 0)
+    ;
+  else if (TrueVal->getZExtValue() == 0 && FalseVal->getZExtValue() != 0)
+    SelectCCMaskVal ^= SelectCCValidVal;
+  else
+    return SDValue();
+
+  if (SelectCCValidVal & ~CCValidVal)
+    return SDValue();
+  if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
+    return SDValue();
+
+  return Select->getOperand(4);
+}
+
 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
                                                  DAGCombinerInfo &DCI) const {
   switch(N->getOpcode()) {
@@ -5580,6 +5742,7 @@ SDValue SystemZTargetLowering::PerformDA
   case ISD::ROTL:               return combineSHIFTROT(N, DCI);
   case SystemZISD::BR_CCMASK:   return combineBR_CCMASK(N, DCI);
   case SystemZISD::SELECT_CCMASK: return combineSELECT_CCMASK(N, DCI);
+  case SystemZISD::GET_CCMASK:  return combineGET_CCMASK(N, DCI);
   }
 
   return SDValue();

Modified: llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.h?rev=331203&r1=331202&r2=331203&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.h (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.h Mon Apr 30 10:54:28 2018
@@ -93,6 +93,19 @@ enum NodeType : unsigned {
   SDIVREM,
   UDIVREM,
 
+  // Add/subtract with overflow/carry.  These have the same operands as
+  // the corresponding standard operations, except with the carry flag
+  // replaced by a condition code value.
+  SADDO, SSUBO, UADDO, USUBO, ADDCARRY, SUBCARRY,
+
+  // Set the condition code from a boolean value in operand 0.
+  // Operand 1 is a mask of all condition-code values that may result of this
+  // operation, operand 2 is a mask of condition-code values that may result
+  // if the boolean is true.
+  // Note that this operation is always optimized away, we will never
+  // generate any code for it.
+  GET_CCMASK,
+
   // Use a series of MVCs to copy bytes from one memory location to another.
   // The operands are:
   // - the target address
@@ -548,6 +561,8 @@ private:
   SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerXALUO(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
@@ -590,6 +605,7 @@ private:
   SDValue combineSHIFTROT(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
 
   // If the last instruction before MBBI in MBB was some form of COMPARE,
   // try to replace it with a COMPARE AND BRANCH just before MBBI.

Modified: llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp?rev=331203&r1=331202&r2=331203&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.cpp Mon Apr 30 10:54:28 2018
@@ -1191,6 +1191,36 @@ MachineInstr *SystemZInstrInfo::foldMemo
     return BuiltMI;
   }
 
+  if ((Opcode == SystemZ::ALFI && OpNum == 0 &&
+       isInt<8>((int32_t)MI.getOperand(2).getImm())) ||
+      (Opcode == SystemZ::ALGFI && OpNum == 0 &&
+       isInt<8>((int64_t)MI.getOperand(2).getImm()))) {
+    // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
+    Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI);
+    MachineInstr *BuiltMI =
+        BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
+            .addFrameIndex(FrameIndex)
+            .addImm(0)
+            .addImm((int8_t)MI.getOperand(2).getImm());
+    transferDeadCC(&MI, BuiltMI);
+    return BuiltMI;
+  }
+
+  if ((Opcode == SystemZ::SLFI && OpNum == 0 &&
+       isInt<8>((int32_t)-MI.getOperand(2).getImm())) ||
+      (Opcode == SystemZ::SLGFI && OpNum == 0 &&
+       isInt<8>((int64_t)-MI.getOperand(2).getImm()))) {
+    // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
+    Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI);
+    MachineInstr *BuiltMI =
+        BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
+            .addFrameIndex(FrameIndex)
+            .addImm(0)
+            .addImm((int8_t)-MI.getOperand(2).getImm());
+    transferDeadCC(&MI, BuiltMI);
+    return BuiltMI;
+  }
+
   if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
     bool Op0IsGPR = (Opcode == SystemZ::LGDR);
     bool Op1IsGPR = (Opcode == SystemZ::LDGR);

Modified: llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td?rev=331203&r1=331202&r2=331203&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZInstrInfo.td Mon Apr 30 10:54:28 2018
@@ -891,12 +891,12 @@ def : Pat<(or (zext32 GR32:$src), imm64h
 // Addition
 //===----------------------------------------------------------------------===//
 
-// Plain addition.
+// Addition producing a signed overflow flag.
 let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
   // Addition of a register.
   let isCommutable = 1 in {
-    defm AR : BinaryRRAndK<"ar", 0x1A, 0xB9F8, add, GR32, GR32>;
-    defm AGR : BinaryRREAndK<"agr", 0xB908, 0xB9E8, add, GR64, GR64>;
+    defm AR : BinaryRRAndK<"ar", 0x1A, 0xB9F8, z_saddo, GR32, GR32>;
+    defm AGR : BinaryRREAndK<"agr", 0xB908, 0xB9E8, z_saddo, GR64, GR64>;
   }
   def AGFR : BinaryRRE<"agfr", 0xB918, null_frag, GR64, GR32>;
 
@@ -907,38 +907,38 @@ let Defs = [CC], CCValues = 0xF, Compare
               Requires<[FeatureHighWord]>;
 
   // Addition of signed 16-bit immediates.
-  defm AHIMux : BinaryRIAndKPseudo<"ahimux", add, GRX32, imm32sx16>;
-  defm AHI  : BinaryRIAndK<"ahi",  0xA7A, 0xECD8, add, GR32, imm32sx16>;
-  defm AGHI : BinaryRIAndK<"aghi", 0xA7B, 0xECD9, add, GR64, imm64sx16>;
+  defm AHIMux : BinaryRIAndKPseudo<"ahimux", z_saddo, GRX32, imm32sx16>;
+  defm AHI  : BinaryRIAndK<"ahi",  0xA7A, 0xECD8, z_saddo, GR32, imm32sx16>;
+  defm AGHI : BinaryRIAndK<"aghi", 0xA7B, 0xECD9, z_saddo, GR64, imm64sx16>;
 
   // Addition of signed 32-bit immediates.
-  def AFIMux : BinaryRIPseudo<add, GRX32, simm32>,
+  def AFIMux : BinaryRIPseudo<z_saddo, GRX32, simm32>,
                Requires<[FeatureHighWord]>;
-  def AFI  : BinaryRIL<"afi",  0xC29, add, GR32, simm32>;
-  def AIH  : BinaryRIL<"aih",  0xCC8, add, GRH32, simm32>,
+  def AFI  : BinaryRIL<"afi",  0xC29, z_saddo, GR32, simm32>;
+  def AIH  : BinaryRIL<"aih",  0xCC8, z_saddo, GRH32, simm32>,
              Requires<[FeatureHighWord]>;
-  def AGFI : BinaryRIL<"agfi", 0xC28, add, GR64, imm64sx32>;
+  def AGFI : BinaryRIL<"agfi", 0xC28, z_saddo, GR64, imm64sx32>;
 
   // Addition of memory.
-  defm AH  : BinaryRXPair<"ah", 0x4A, 0xE37A, add, GR32, asextloadi16, 2>;
-  defm A   : BinaryRXPair<"a",  0x5A, 0xE35A, add, GR32, load, 4>;
-  def  AGH : BinaryRXY<"agh", 0xE338, add, GR64, asextloadi16, 2>,
+  defm AH  : BinaryRXPair<"ah", 0x4A, 0xE37A, z_saddo, GR32, asextloadi16, 2>;
+  defm A   : BinaryRXPair<"a",  0x5A, 0xE35A, z_saddo, GR32, load, 4>;
+  def  AGH : BinaryRXY<"agh", 0xE338, z_saddo, GR64, asextloadi16, 2>,
              Requires<[FeatureMiscellaneousExtensions2]>;
-  def  AGF : BinaryRXY<"agf", 0xE318, add, GR64, asextloadi32, 4>;
-  def  AG  : BinaryRXY<"ag",  0xE308, add, GR64, load, 8>;
+  def  AGF : BinaryRXY<"agf", 0xE318, z_saddo, GR64, asextloadi32, 4>;
+  def  AG  : BinaryRXY<"ag",  0xE308, z_saddo, GR64, load, 8>;
 
   // Addition to memory.
-  def ASI  : BinarySIY<"asi",  0xEB6A, add, imm32sx8>;
-  def AGSI : BinarySIY<"agsi", 0xEB7A, add, imm64sx8>;
+  def ASI  : BinarySIY<"asi",  0xEB6A, null_frag, imm32sx8>;
+  def AGSI : BinarySIY<"agsi", 0xEB7A, null_frag, imm64sx8>;
 }
-defm : SXB<add, GR64, AGFR>;
+defm : SXB<z_saddo, GR64, AGFR>;
 
 // Addition producing a carry.
 let Defs = [CC] in {
   // Addition of a register.
   let isCommutable = 1 in {
-    defm ALR : BinaryRRAndK<"alr", 0x1E, 0xB9FA, addc, GR32, GR32>;
-    defm ALGR : BinaryRREAndK<"algr", 0xB90A, 0xB9EA, addc, GR64, GR64>;
+    defm ALR : BinaryRRAndK<"alr", 0x1E, 0xB9FA, z_uaddo, GR32, GR32>;
+    defm ALGR : BinaryRREAndK<"algr", 0xB90A, 0xB9EA, z_uaddo, GR64, GR64>;
   }
   def ALGFR : BinaryRRE<"algfr", 0xB91A, null_frag, GR64, GR32>;
 
@@ -949,56 +949,104 @@ let Defs = [CC] in {
                Requires<[FeatureHighWord]>;
 
   // Addition of signed 16-bit immediates.
-  def ALHSIK  : BinaryRIE<"alhsik",  0xECDA, addc, GR32, imm32sx16>,
+  def ALHSIK  : BinaryRIE<"alhsik",  0xECDA, z_uaddo, GR32, imm32sx16>,
                 Requires<[FeatureDistinctOps]>;
-  def ALGHSIK : BinaryRIE<"alghsik", 0xECDB, addc, GR64, imm64sx16>,
+  def ALGHSIK : BinaryRIE<"alghsik", 0xECDB, z_uaddo, GR64, imm64sx16>,
                 Requires<[FeatureDistinctOps]>;
 
   // Addition of unsigned 32-bit immediates.
-  def ALFI  : BinaryRIL<"alfi",  0xC2B, addc, GR32, uimm32>;
-  def ALGFI : BinaryRIL<"algfi", 0xC2A, addc, GR64, imm64zx32>;
+  def ALFI  : BinaryRIL<"alfi",  0xC2B, z_uaddo, GR32, uimm32>;
+  def ALGFI : BinaryRIL<"algfi", 0xC2A, z_uaddo, GR64, imm64zx32>;
 
   // Addition of signed 32-bit immediates.
   def ALSIH : BinaryRIL<"alsih", 0xCCA, null_frag, GRH32, simm32>,
               Requires<[FeatureHighWord]>;
 
   // Addition of memory.
-  defm AL   : BinaryRXPair<"al", 0x5E, 0xE35E, addc, GR32, load, 4>;
-  def  ALGF : BinaryRXY<"algf", 0xE31A, addc, GR64, azextloadi32, 4>;
-  def  ALG  : BinaryRXY<"alg",  0xE30A, addc, GR64, load, 8>;
+  defm AL   : BinaryRXPair<"al", 0x5E, 0xE35E, z_uaddo, GR32, load, 4>;
+  def  ALGF : BinaryRXY<"algf", 0xE31A, z_uaddo, GR64, azextloadi32, 4>;
+  def  ALG  : BinaryRXY<"alg",  0xE30A, z_uaddo, GR64, load, 8>;
 
   // Addition to memory.
   def ALSI  : BinarySIY<"alsi",  0xEB6E, null_frag, imm32sx8>;
   def ALGSI : BinarySIY<"algsi", 0xEB7E, null_frag, imm64sx8>;
 }
-defm : ZXB<addc, GR64, ALGFR>;
+defm : ZXB<z_uaddo, GR64, ALGFR>;
 
 // Addition producing and using a carry.
 let Defs = [CC], Uses = [CC] in {
   // Addition of a register.
-  def ALCR  : BinaryRRE<"alcr",  0xB998, adde, GR32, GR32>;
-  def ALCGR : BinaryRRE<"alcgr", 0xB988, adde, GR64, GR64>;
+  def ALCR  : BinaryRRE<"alcr",  0xB998, z_addcarry, GR32, GR32>;
+  def ALCGR : BinaryRRE<"alcgr", 0xB988, z_addcarry, GR64, GR64>;
 
   // Addition of memory.
-  def ALC  : BinaryRXY<"alc",  0xE398, adde, GR32, load, 4>;
-  def ALCG : BinaryRXY<"alcg", 0xE388, adde, GR64, load, 8>;
+  def ALC  : BinaryRXY<"alc",  0xE398, z_addcarry, GR32, load, 4>;
+  def ALCG : BinaryRXY<"alcg", 0xE388, z_addcarry, GR64, load, 8>;
 }
 
 // Addition that does not modify the condition code.
 def ALSIHN : BinaryRIL<"alsihn", 0xCCB, null_frag, GRH32, simm32>,
              Requires<[FeatureHighWord]>;
 
+// Map plain addition to either arithmetic or logical operation.
+
+def : Pat<(add GR32:$src1, GR32:$src2),
+          (AR GR32:$src1, GR32:$src2)>;
+def : Pat<(add GR64:$src1, GR64:$src2),
+          (AGR GR64:$src1, GR64:$src2)>;
+defm : SXB<add, GR64, AGFR>;
+defm : ZXB<add, GR64, ALGFR>;
+
+def : Pat<(add GRX32:$src1, imm32sx16:$src2),
+          (AHIMux GRX32:$src1, imm32sx16:$src2)>, Requires<[FeatureHighWord]>;
+def : Pat<(add GR32:$src1, imm32sx16:$src2),
+          (AHI GR32:$src1, imm32sx16:$src2)>;
+def : Pat<(add GR64:$src1, imm64sx16:$src2),
+          (AGHI GR64:$src1, imm64sx16:$src2)>;
+def : Pat<(add GRX32:$src1, simm32:$src2),
+          (AFIMux GRX32:$src1, simm32:$src2)>, Requires<[FeatureHighWord]>;
+def : Pat<(add GR32:$src1, simm32:$src2),
+          (AFI GR32:$src1, simm32:$src2)>;
+def : Pat<(add GRH32:$src1, simm32:$src2),
+          (AIH GRH32:$src1, simm32:$src2)>, Requires<[FeatureHighWord]>;
+def : Pat<(add GR64:$src1, imm64sx32:$src2),
+          (AGFI GR64:$src1, imm64sx32:$src2)>;
+def : Pat<(add GR64:$src1, imm64zx32:$src2),
+          (ALGFI GR64:$src1, imm64zx32:$src2)>;
+
+def : Pat<(add GR32:$src1, (asextloadi16 bdxaddr12pair:$addr)),
+          (AH GR32:$src1, bdxaddr12pair:$addr)>;
+def : Pat<(add GR32:$src1, (asextloadi16 bdxaddr20pair:$addr)),
+          (AHY GR32:$src1, bdxaddr20pair:$addr)>;
+def : Pat<(add GR32:$src1, (load bdxaddr12pair:$addr)),
+          (A GR32:$src1, bdxaddr12pair:$addr)>;
+def : Pat<(add GR32:$src1, (load bdxaddr20pair:$addr)),
+          (AY GR32:$src1, bdxaddr20pair:$addr)>;
+def : Pat<(add GR64:$src1, (asextloadi16 bdxaddr20only:$addr)),
+          (AGH GR64:$src1, bdxaddr20only:$addr)>,
+      Requires<[FeatureMiscellaneousExtensions2]>;
+def : Pat<(add GR64:$src1, (asextloadi32 bdxaddr20only:$addr)),
+          (AGF GR64:$src1, bdxaddr20only:$addr)>;
+def : Pat<(add GR64:$src1, (azextloadi32 bdxaddr20only:$addr)),
+          (ALGF GR64:$src1, bdxaddr20only:$addr)>;
+def : Pat<(add GR64:$src1, (load bdxaddr20only:$addr)),
+          (AG GR64:$src1, bdxaddr20only:$addr)>;
+
+def : Pat<(store (add (load bdaddr20only:$addr), imm32sx8:$src2), bdaddr20only:$addr),
+          (ASI bdaddr20only:$addr, imm32sx8:$src2)>;
+def : Pat<(store (add (load bdaddr20only:$addr), imm64sx8:$src2), bdaddr20only:$addr),
+          (AGSI bdaddr20only:$addr, imm64sx8:$src2)>;
+
 //===----------------------------------------------------------------------===//
 // Subtraction
 //===----------------------------------------------------------------------===//
 
-// Plain subtraction.  Although immediate forms exist, we use the
-// add-immediate instruction instead.
+// Subtraction producing a signed overflow flag.
 let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
   // Subtraction of a register.
-  defm SR : BinaryRRAndK<"sr", 0x1B, 0xB9F9, sub, GR32, GR32>;
+  defm SR : BinaryRRAndK<"sr", 0x1B, 0xB9F9, z_ssubo, GR32, GR32>;
   def SGFR : BinaryRRE<"sgfr", 0xB919, null_frag, GR64, GR32>;
-  defm SGR : BinaryRREAndK<"sgr", 0xB909, 0xB9E9, sub, GR64, GR64>;
+  defm SGR : BinaryRREAndK<"sgr", 0xB909, 0xB9E9, z_ssubo, GR64, GR64>;
 
   // Subtraction from a high register.
   def SHHHR : BinaryRRFa<"shhhr", 0xB9C9, null_frag, GRH32, GRH32, GRH32>,
@@ -1007,21 +1055,39 @@ let Defs = [CC], CCValues = 0xF, Compare
               Requires<[FeatureHighWord]>;
 
   // Subtraction of memory.
-  defm SH  : BinaryRXPair<"sh", 0x4B, 0xE37B, sub, GR32, asextloadi16, 2>;
-  defm S   : BinaryRXPair<"s", 0x5B, 0xE35B, sub, GR32, load, 4>;
-  def  SGH : BinaryRXY<"sgh", 0xE339, sub, GR64, asextloadi16, 2>,
+  defm SH  : BinaryRXPair<"sh", 0x4B, 0xE37B, z_ssubo, GR32, asextloadi16, 2>;
+  defm S   : BinaryRXPair<"s", 0x5B, 0xE35B, z_ssubo, GR32, load, 4>;
+  def  SGH : BinaryRXY<"sgh", 0xE339, z_ssubo, GR64, asextloadi16, 2>,
              Requires<[FeatureMiscellaneousExtensions2]>;
-  def  SGF : BinaryRXY<"sgf", 0xE319, sub, GR64, asextloadi32, 4>;
-  def  SG  : BinaryRXY<"sg",  0xE309, sub, GR64, load, 8>;
+  def  SGF : BinaryRXY<"sgf", 0xE319, z_ssubo, GR64, asextloadi32, 4>;
+  def  SG  : BinaryRXY<"sg",  0xE309, z_ssubo, GR64, load, 8>;
+}
+defm : SXB<z_ssubo, GR64, SGFR>;
+
+// Subtracting an immediate is the same as adding the negated immediate.
+let AddedComplexity = 1 in {
+  def : Pat<(z_ssubo GR32:$src1, imm32sx16n:$src2),
+            (AHIMux GR32:$src1, imm32sx16n:$src2)>,
+        Requires<[FeatureHighWord]>;
+  def : Pat<(z_ssubo GR32:$src1, simm32n:$src2),
+            (AFIMux GR32:$src1, simm32n:$src2)>,
+        Requires<[FeatureHighWord]>;
+  def : Pat<(z_ssubo GR32:$src1, imm32sx16n:$src2),
+            (AHI GR32:$src1, imm32sx16n:$src2)>;
+  def : Pat<(z_ssubo GR32:$src1, simm32n:$src2),
+            (AFI GR32:$src1, simm32n:$src2)>;
+  def : Pat<(z_ssubo GR64:$src1, imm64sx16n:$src2),
+            (AGHI GR64:$src1, imm64sx16n:$src2)>;
+  def : Pat<(z_ssubo GR64:$src1, imm64sx32n:$src2),
+            (AGFI GR64:$src1, imm64sx32n:$src2)>;
 }
-defm : SXB<sub, GR64, SGFR>;
 
 // Subtraction producing a carry.
 let Defs = [CC] in {
   // Subtraction of a register.
-  defm SLR : BinaryRRAndK<"slr", 0x1F, 0xB9FB, subc, GR32, GR32>;
+  defm SLR : BinaryRRAndK<"slr", 0x1F, 0xB9FB, z_usubo, GR32, GR32>;
   def SLGFR : BinaryRRE<"slgfr", 0xB91B, null_frag, GR64, GR32>;
-  defm SLGR : BinaryRREAndK<"slgr", 0xB90B, 0xB9EB, subc, GR64, GR64>;
+  defm SLGR : BinaryRREAndK<"slgr", 0xB90B, 0xB9EB, z_usubo, GR64, GR64>;
 
   // Subtraction from a high register.
   def SLHHHR : BinaryRRFa<"slhhhr", 0xB9CB, null_frag, GRH32, GRH32, GRH32>,
@@ -1029,29 +1095,68 @@ let Defs = [CC] in {
   def SLHHLR : BinaryRRFa<"slhhlr", 0xB9DB, null_frag, GRH32, GRH32, GR32>,
                Requires<[FeatureHighWord]>;
 
-  // Subtraction of unsigned 32-bit immediates.  These don't match
-  // subc because we prefer addc for constants.
-  def SLFI  : BinaryRIL<"slfi",  0xC25, null_frag, GR32, uimm32>;
-  def SLGFI : BinaryRIL<"slgfi", 0xC24, null_frag, GR64, imm64zx32>;
+  // Subtraction of unsigned 32-bit immediates.
+  def SLFI  : BinaryRIL<"slfi",  0xC25, z_usubo, GR32, uimm32>;
+  def SLGFI : BinaryRIL<"slgfi", 0xC24, z_usubo, GR64, imm64zx32>;
 
   // Subtraction of memory.
-  defm SL   : BinaryRXPair<"sl", 0x5F, 0xE35F, subc, GR32, load, 4>;
-  def  SLGF : BinaryRXY<"slgf", 0xE31B, subc, GR64, azextloadi32, 4>;
-  def  SLG  : BinaryRXY<"slg",  0xE30B, subc, GR64, load, 8>;
+  defm SL   : BinaryRXPair<"sl", 0x5F, 0xE35F, z_usubo, GR32, load, 4>;
+  def  SLGF : BinaryRXY<"slgf", 0xE31B, z_usubo, GR64, azextloadi32, 4>;
+  def  SLG  : BinaryRXY<"slg",  0xE30B, z_usubo, GR64, load, 8>;
+}
+defm : ZXB<z_usubo, GR64, SLGFR>;
+
+// Subtracting an immediate is the same as adding the negated immediate.
+let AddedComplexity = 1 in {
+  def : Pat<(z_usubo GR32:$src1, imm32sx16n:$src2),
+            (ALHSIK GR32:$src1, imm32sx16n:$src2)>,
+        Requires<[FeatureDistinctOps]>;
+  def : Pat<(z_usubo GR64:$src1, imm64sx16n:$src2),
+            (ALGHSIK GR64:$src1, imm64sx16n:$src2)>,
+        Requires<[FeatureDistinctOps]>;
 }
-defm : ZXB<subc, GR64, SLGFR>;
 
 // Subtraction producing and using a carry.
 let Defs = [CC], Uses = [CC] in {
   // Subtraction of a register.
-  def SLBR  : BinaryRRE<"slbr",  0xB999, sube, GR32, GR32>;
-  def SLBGR : BinaryRRE<"slbgr", 0xB989, sube, GR64, GR64>;
+  def SLBR  : BinaryRRE<"slbr",  0xB999, z_subcarry, GR32, GR32>;
+  def SLBGR : BinaryRRE<"slbgr", 0xB989, z_subcarry, GR64, GR64>;
 
   // Subtraction of memory.
-  def SLB  : BinaryRXY<"slb",  0xE399, sube, GR32, load, 4>;
-  def SLBG : BinaryRXY<"slbg", 0xE389, sube, GR64, load, 8>;
+  def SLB  : BinaryRXY<"slb",  0xE399, z_subcarry, GR32, load, 4>;
+  def SLBG : BinaryRXY<"slbg", 0xE389, z_subcarry, GR64, load, 8>;
 }
 
+// Map plain subtraction to either arithmetic or logical operation.
+
+def : Pat<(sub GR32:$src1, GR32:$src2),
+          (SR GR32:$src1, GR32:$src2)>;
+def : Pat<(sub GR64:$src1, GR64:$src2),
+          (SGR GR64:$src1, GR64:$src2)>;
+defm : SXB<sub, GR64, SGFR>;
+defm : ZXB<sub, GR64, SLGFR>;
+
+def : Pat<(add GR64:$src1, imm64zx32n:$src2),
+          (SLGFI GR64:$src1, imm64zx32n:$src2)>;
+
+def : Pat<(sub GR32:$src1, (asextloadi16 bdxaddr12pair:$addr)),
+          (SH GR32:$src1, bdxaddr12pair:$addr)>;
+def : Pat<(sub GR32:$src1, (asextloadi16 bdxaddr20pair:$addr)),
+          (SHY GR32:$src1, bdxaddr20pair:$addr)>;
+def : Pat<(sub GR32:$src1, (load bdxaddr12pair:$addr)),
+          (S GR32:$src1, bdxaddr12pair:$addr)>;
+def : Pat<(sub GR32:$src1, (load bdxaddr20pair:$addr)),
+          (SY GR32:$src1, bdxaddr20pair:$addr)>;
+def : Pat<(sub GR64:$src1, (asextloadi16 bdxaddr20only:$addr)),
+          (SGH GR64:$src1, bdxaddr20only:$addr)>,
+      Requires<[FeatureMiscellaneousExtensions2]>;
+def : Pat<(sub GR64:$src1, (asextloadi32 bdxaddr20only:$addr)),
+          (SGF GR64:$src1, bdxaddr20only:$addr)>;
+def : Pat<(sub GR64:$src1, (azextloadi32 bdxaddr20only:$addr)),
+          (SLGF GR64:$src1, bdxaddr20only:$addr)>;
+def : Pat<(sub GR64:$src1, (load bdxaddr20only:$addr)),
+          (SG GR64:$src1, bdxaddr20only:$addr)>;
+
 //===----------------------------------------------------------------------===//
 // AND
 //===----------------------------------------------------------------------===//
@@ -2119,20 +2224,6 @@ let isCodeGenOnly = 1, hasSideEffects =
 // Peepholes.
 //===----------------------------------------------------------------------===//
 
-// Use AL* for GR64 additions of unsigned 32-bit values.
-defm : ZXB<add, GR64, ALGFR>;
-def  : Pat<(add GR64:$src1, imm64zx32:$src2),
-           (ALGFI GR64:$src1, imm64zx32:$src2)>;
-def  : Pat<(add GR64:$src1, (azextloadi32 bdxaddr20only:$addr)),
-           (ALGF GR64:$src1, bdxaddr20only:$addr)>;
-
-// Use SL* for GR64 subtractions of unsigned 32-bit values.
-defm : ZXB<sub, GR64, SLGFR>;
-def  : Pat<(add GR64:$src1, imm64zx32n:$src2),
-           (SLGFI GR64:$src1, imm64zx32n:$src2)>;
-def  : Pat<(sub GR64:$src1, (azextloadi32 bdxaddr20only:$addr)),
-           (SLGF GR64:$src1, bdxaddr20only:$addr)>;
-
 // Avoid generating 2 XOR instructions. (xor (and x, y), y) is
 // equivalent to (and (xor x, -1), y)
 def : Pat<(and (xor GR64:$x, (i64 -1)), GR64:$y),

Modified: llvm/trunk/lib/Target/SystemZ/SystemZOperands.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZOperands.td?rev=331203&r1=331202&r2=331203&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZOperands.td (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZOperands.td Mon Apr 30 10:54:28 2018
@@ -219,6 +219,12 @@ def SIMM16 : SDNodeXForm<imm, [{
                                    MVT::i64);
 }]>;
 
+// Negate and then truncate an immediate to a 16-bit signed quantity.
+def NEGSIMM16 : SDNodeXForm<imm, [{
+  return CurDAG->getTargetConstant(int16_t(-N->getZExtValue()), SDLoc(N),
+                                   MVT::i64);
+}]>;
+
 // Truncate an immediate to a 16-bit unsigned quantity.
 def UIMM16 : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant(uint16_t(N->getZExtValue()), SDLoc(N),
@@ -231,24 +237,30 @@ def SIMM32 : SDNodeXForm<imm, [{
                                    MVT::i64);
 }]>;
 
+// Negate and then truncate an immediate to a 32-bit unsigned quantity.
+def NEGSIMM32 : SDNodeXForm<imm, [{
+  return CurDAG->getTargetConstant(int32_t(-N->getZExtValue()), SDLoc(N),
+                                   MVT::i64);
+}]>;
+
 // Truncate an immediate to a 32-bit unsigned quantity.
 def UIMM32 : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant(uint32_t(N->getZExtValue()), SDLoc(N),
                                    MVT::i64);
 }]>;
 
+// Negate and then truncate an immediate to a 32-bit unsigned quantity.
+def NEGUIMM32 : SDNodeXForm<imm, [{
+  return CurDAG->getTargetConstant(uint32_t(-N->getZExtValue()), SDLoc(N),
+                                   MVT::i64);
+}]>;
+
 // Truncate an immediate to a 48-bit unsigned quantity.
 def UIMM48 : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant(uint64_t(N->getZExtValue()) & 0xffffffffffff,
                                    SDLoc(N), MVT::i64);
 }]>;
 
-// Negate and then truncate an immediate to a 32-bit unsigned quantity.
-def NEGIMM32 : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(uint32_t(-N->getZExtValue()), SDLoc(N),
-                                   MVT::i64);
-}]>;
-
 //===----------------------------------------------------------------------===//
 // Immediate asm operands.
 //===----------------------------------------------------------------------===//
@@ -336,6 +348,10 @@ def imm32sx16 : Immediate<i32, [{
   return isInt<16>(N->getSExtValue());
 }], SIMM16, "S16Imm">;
 
+def imm32sx16n : Immediate<i32, [{
+  return isInt<16>(-N->getSExtValue());
+}], NEGSIMM16, "S16Imm">;
+
 def imm32zx16 : Immediate<i32, [{
   return isUInt<16>(N->getZExtValue());
 }], UIMM16, "U16Imm">;
@@ -348,6 +364,10 @@ def imm32sx16trunc : Immediate<i32, [{}]
 def simm32 : Immediate<i32, [{}], SIMM32, "S32Imm">;
 def uimm32 : Immediate<i32, [{}], UIMM32, "U32Imm">;
 
+def simm32n : Immediate<i32, [{
+  return isInt<32>(-N->getSExtValue());
+}], NEGSIMM32, "S32Imm">;
+
 def imm32 : ImmLeaf<i32, [{}]>;
 
 //===----------------------------------------------------------------------===//
@@ -423,6 +443,10 @@ def imm64sx16 : Immediate<i64, [{
   return isInt<16>(N->getSExtValue());
 }], SIMM16, "S16Imm">;
 
+def imm64sx16n : Immediate<i64, [{
+  return isInt<16>(-N->getSExtValue());
+}], NEGSIMM16, "S16Imm">;
+
 def imm64zx16 : Immediate<i64, [{
   return isUInt<16>(N->getZExtValue());
 }], UIMM16, "U16Imm">;
@@ -431,13 +455,17 @@ def imm64sx32 : Immediate<i64, [{
   return isInt<32>(N->getSExtValue());
 }], SIMM32, "S32Imm">;
 
+def imm64sx32n : Immediate<i64, [{
+  return isInt<32>(-N->getSExtValue());
+}], NEGSIMM32, "S32Imm">;
+
 def imm64zx32 : Immediate<i64, [{
   return isUInt<32>(N->getZExtValue());
 }], UIMM32, "U32Imm">;
 
 def imm64zx32n : Immediate<i64, [{
   return isUInt<32>(-N->getSExtValue());
-}], NEGIMM32, "U32Imm">;
+}], NEGUIMM32, "U32Imm">;
 
 def imm64zx48 : Immediate<i64, [{
   return isUInt<64>(N->getZExtValue());

Modified: llvm/trunk/lib/Target/SystemZ/SystemZOperators.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZOperators.td?rev=331203&r1=331202&r2=331203&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZOperators.td (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZOperators.td Mon Apr 30 10:54:28 2018
@@ -45,6 +45,17 @@ def SDT_ZGR128Binary        : SDTypeProf
                                             [SDTCisVT<0, untyped>,
                                              SDTCisInt<1>,
                                              SDTCisInt<2>]>;
+def SDT_ZBinaryWithFlags    : SDTypeProfile<2, 2,
+                                            [SDTCisInt<0>,
+                                             SDTCisVT<1, i32>,
+                                             SDTCisSameAs<0, 2>,
+                                             SDTCisSameAs<0, 3>]>;
+def SDT_ZBinaryWithCarry    : SDTypeProfile<2, 3,
+                                            [SDTCisInt<0>,
+                                             SDTCisVT<1, i32>,
+                                             SDTCisSameAs<0, 2>,
+                                             SDTCisSameAs<0, 3>,
+                                             SDTCisVT<1, i32>]>;
 def SDT_ZAtomicLoadBinaryW  : SDTypeProfile<1, 5,
                                             [SDTCisVT<0, i32>,
                                              SDTCisPtrTy<1>,
@@ -262,6 +273,12 @@ def z_smul_lohi         : SDNode<"System
 def z_umul_lohi         : SDNode<"SystemZISD::UMUL_LOHI", SDT_ZGR128Binary>;
 def z_sdivrem           : SDNode<"SystemZISD::SDIVREM", SDT_ZGR128Binary>;
 def z_udivrem           : SDNode<"SystemZISD::UDIVREM", SDT_ZGR128Binary>;
+def z_saddo             : SDNode<"SystemZISD::SADDO", SDT_ZBinaryWithFlags>;
+def z_ssubo             : SDNode<"SystemZISD::SSUBO", SDT_ZBinaryWithFlags>;
+def z_uaddo             : SDNode<"SystemZISD::UADDO", SDT_ZBinaryWithFlags>;
+def z_usubo             : SDNode<"SystemZISD::USUBO", SDT_ZBinaryWithFlags>;
+def z_addcarry_1        : SDNode<"SystemZISD::ADDCARRY", SDT_ZBinaryWithCarry>;
+def z_subcarry_1        : SDNode<"SystemZISD::SUBCARRY", SDT_ZBinaryWithCarry>;
 
 def z_membarrier        : SDNode<"SystemZISD::MEMBARRIER", SDTNone,
                                  [SDNPHasChain, SDNPSideEffect]>;
@@ -432,6 +449,10 @@ def z_select_ccmask
             (z_select_ccmask_1 node:$true, node:$false,
                                node:$valid, node:$mask, CC)>;
 def z_ipm : PatFrag<(ops), (z_ipm_1 CC)>;
+def z_addcarry : PatFrag<(ops node:$lhs, node:$rhs),
+                              (z_addcarry_1 node:$lhs, node:$rhs, CC)>;
+def z_subcarry : PatFrag<(ops node:$lhs, node:$rhs),
+                              (z_subcarry_1 node:$lhs, node:$rhs, CC)>;
 
 // Signed and unsigned comparisons.
 def z_scmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, imm), [{

Modified: llvm/trunk/test/CodeGen/SystemZ/asm-18.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/asm-18.ll?rev=331203&r1=331202&r2=331203&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/asm-18.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/asm-18.ll Mon Apr 30 10:54:28 2018
@@ -748,3 +748,78 @@ define void @f34(i32 *%ptr1, i32 *%ptr2)
   store i32 %sel2, i32 *%ptr1
   ret void
 }
+
+; Test immediate addition with overflow involving high registers.
+define void @f35() {
+; CHECK-LABEL: f35:
+; CHECK: stepa [[REG:%r[0-5]]]
+; CHECK: aih [[REG]], -32768
+; CHECK: ipm [[REGCC:%r[0-5]]]
+; CHECK: afi [[REGCC]], 1342177280
+; CHECK: srl [[REGCC]], 31
+; CHECK: stepb [[REG]], [[REGCC]]
+; CHECK: aih [[REG]], 1
+; CHECK: ipm [[REGCC:%r[0-5]]]
+; CHECK: afi [[REGCC]], 1342177280
+; CHECK: srl [[REGCC]], 31
+; CHECK: stepc [[REG]], [[REGCC]]
+; CHECK: aih [[REG]], 32767
+; CHECK: ipm [[REGCC:%r[0-5]]]
+; CHECK: afi [[REGCC]], 1342177280
+; CHECK: srl [[REGCC]], 31
+; CHECK: stepd [[REG]], [[REGCC]]
+; CHECK: br %r14
+  %res1 = call i32 asm "stepa $0", "=h"()
+  %t1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %res1, i32 -32768)
+  %val1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res2 = call i32 asm "stepb $0, $2", "=h,h,d"(i32 %val1, i1 %obit1)
+  %t2 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %res2, i32 1)
+  %val2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res3 = call i32 asm "stepc $0, $2", "=h,h,d"(i32 %val2, i1 %obit2)
+  %t3 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %res3, i32 32767)
+  %val3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  call void asm sideeffect "stepd $0, $1", "h,d"(i32 %val3, i1 %obit3)
+  ret void
+}
+
+; Test large immediate addition with overflow involving high registers.
+define void @f36() {
+; CHECK-LABEL: f36:
+; CHECK: stepa [[REG:%r[0-5]]]
+; CHECK: aih [[REG]], -2147483648
+; CHECK: ipm [[REGCC:%r[0-5]]]
+; CHECK: afi [[REGCC]], 1342177280
+; CHECK: srl [[REGCC]], 31
+; CHECK: stepb [[REG]], [[REGCC]]
+; CHECK: aih [[REG]], 1
+; CHECK: ipm [[REGCC:%r[0-5]]]
+; CHECK: afi [[REGCC]], 1342177280
+; CHECK: srl [[REGCC]], 31
+; CHECK: stepc [[REG]], [[REGCC]]
+; CHECK: aih [[REG]], 2147483647
+; CHECK: ipm [[REGCC:%r[0-5]]]
+; CHECK: afi [[REGCC]], 1342177280
+; CHECK: srl [[REGCC]], 31
+; CHECK: stepd [[REG]], [[REGCC]]
+; CHECK: br %r14
+  %res1 = call i32 asm "stepa $0", "=h"()
+  %t1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %res1, i32 -2147483648)
+  %val1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res2 = call i32 asm "stepb $0, $2", "=h,h,d"(i32 %val1, i1 %obit1)
+  %t2 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %res2, i32 1)
+  %val2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res3 = call i32 asm "stepc $0, $2", "=h,h,d"(i32 %val2, i1 %obit2)
+  %t3 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %res3, i32 2147483647)
+  %val3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  call void asm sideeffect "stepd $0, $1", "h,d"(i32 %val3, i1 %obit3)
+  ret void
+}
+
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+

Modified: llvm/trunk/test/CodeGen/SystemZ/int-add-08.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-add-08.ll?rev=331203&r1=331202&r2=331203&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-add-08.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/int-add-08.ll Mon Apr 30 10:54:28 2018
@@ -123,12 +123,14 @@ define void @f8(i128 *%ptr0) {
   %ptr2 = getelementptr i128, i128 *%ptr0, i128 4
   %ptr3 = getelementptr i128, i128 *%ptr0, i128 6
   %ptr4 = getelementptr i128, i128 *%ptr0, i128 8
+  %ptr5 = getelementptr i128, i128 *%ptr0, i128 10
 
   %val0 = load i128 , i128 *%ptr0
   %val1 = load i128 , i128 *%ptr1
   %val2 = load i128 , i128 *%ptr2
   %val3 = load i128 , i128 *%ptr3
   %val4 = load i128 , i128 *%ptr4
+  %val5 = load i128 , i128 *%ptr5
 
   %retptr = call i128 *@foo()
 
@@ -138,7 +140,8 @@ define void @f8(i128 *%ptr0) {
   %add2 = add i128 %add1, %val2
   %add3 = add i128 %add2, %val3
   %add4 = add i128 %add3, %val4
-  store i128 %add4, i128 *%retptr
+  %add5 = add i128 %add4, %val5
+  store i128 %add5, i128 *%retptr
 
   ret void
 }

Added: llvm/trunk/test/CodeGen/SystemZ/int-sadd-01.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-sadd-01.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-sadd-01.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-sadd-01.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,325 @@
+; Test 32-bit addition in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check AR.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: ar %r3, %r4
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: ar %r3, %r4
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: ar %r3, %r4
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check the low end of the A range.
+define zeroext i1 @f4(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: a %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %b = load i32, i32 *%src
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned A range.
+define zeroext i1 @f5(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: a %r3, 4092(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 1023
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word up, which should use AY instead of A.
+define zeroext i1 @f6(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: ay %r3, 4096(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 1024
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned AY range.
+define zeroext i1 @f7(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: ay %r3, 524284(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131071
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f8(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: agfi %r4, 524288
+; CHECK: a %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131072
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned AY range.
+define zeroext i1 @f9(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: ay %r3, -4(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -1
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the AY range.
+define zeroext i1 @f10(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: ay %r3, -524288(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131072
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f11(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: agfi %r4, -524292
+; CHECK: a %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131073
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that A allows an index.
+define zeroext i1 @f12(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: a %r4, 4092({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4092
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that AY allows an index.
+define zeroext i1 @f13(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f13:
+; CHECK: ay %r4, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4096
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that additions of spilled values can use A rather than AR.
+define zeroext i1 @f14(i32 *%ptr0) {
+; CHECK-LABEL: f14:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: a %r2, 16{{[04]}}(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+  %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+  %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+  %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+  %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+  %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+  %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+  %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+  %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
+
+  %val0 = load i32, i32 *%ptr0
+  %val1 = load i32, i32 *%ptr1
+  %val2 = load i32, i32 *%ptr2
+  %val3 = load i32, i32 *%ptr3
+  %val4 = load i32, i32 *%ptr4
+  %val5 = load i32, i32 *%ptr5
+  %val6 = load i32, i32 *%ptr6
+  %val7 = load i32, i32 *%ptr7
+  %val8 = load i32, i32 *%ptr8
+  %val9 = load i32, i32 *%ptr9
+
+  %ret = call i32 @foo()
+
+  %t0 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %ret, i32 %val0)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %add0, i32 %val1)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %add1, i32 %val2)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %add2, i32 %val3)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %add3, i32 %val4)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %add4, i32 %val5)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %add5, i32 %val6)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %add6, i32 %val7)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %add7, i32 %val8)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %add8, i32 %val9)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-sadd-02.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-sadd-02.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-sadd-02.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-sadd-02.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,253 @@
+; Test 32-bit addition in which the second operand is a sign-extended
+; i16 memory value.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check the low end of the AH range.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: ah %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned AH range.
+define zeroext i1 @f2(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: ah %r3, 4094(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 2047
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next halfword up, which should use AHY instead of AH.
+define zeroext i1 @f3(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: ahy %r3, 4096(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 2048
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned AHY range.
+define zeroext i1 @f4(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: ahy %r3, 524286(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 262143
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f5(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: agfi %r4, 524288
+; CHECK: ah %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 262144
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned AHY range.
+define zeroext i1 @f6(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: ahy %r3, -2(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -1
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the AHY range.
+define zeroext i1 @f7(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: ahy %r3, -524288(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -262144
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f8(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: agfi %r4, -524290
+; CHECK: ah %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -262145
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that AH allows an index.
+define zeroext i1 @f9(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: ah %r4, 4094({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4094
+  %ptr = inttoptr i64 %add2 to i16 *
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that AHY allows an index.
+define zeroext i1 @f10(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: ahy %r4, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4096
+  %ptr = inttoptr i64 %add2 to i16 *
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f11(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: ah %r3, 0(%r4)
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f12(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: ah %r3, 0(%r4)
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-sadd-03.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-sadd-03.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-sadd-03.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-sadd-03.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,269 @@
+; Test 64-bit addition in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check AGR.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: agr %r3, %r4
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: agr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: agr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check AG with no displacement.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: ag %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %b = load i64, i64 *%src
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned AG range.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: ag %r3, 524280(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 65535
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r4, 524288
+; CHECK: ag %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 65536
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned AG range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: ag %r3, -8(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -1
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the AG range.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: ag %r3, -524288(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -65536
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r4, -524296
+; CHECK: ag %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -65537
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that AG allows an index.
+define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, i64 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: ag %r4, 524280({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: stg %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 524280
+  %ptr = inttoptr i64 %add2 to i64 *
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that additions of spilled values can use AG rather than AGR.
+define zeroext i1 @f11(i64 *%ptr0) {
+; CHECK-LABEL: f11:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: ag %r2, 16{{[04]}}(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+  %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+  %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+  %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+  %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+  %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+  %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+  %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+  %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
+
+  %val0 = load i64, i64 *%ptr0
+  %val1 = load i64, i64 *%ptr1
+  %val2 = load i64, i64 *%ptr2
+  %val3 = load i64, i64 *%ptr3
+  %val4 = load i64, i64 *%ptr4
+  %val5 = load i64, i64 *%ptr5
+  %val6 = load i64, i64 *%ptr6
+  %val7 = load i64, i64 *%ptr7
+  %val8 = load i64, i64 *%ptr8
+  %val9 = load i64, i64 *%ptr9
+
+  %ret = call i64 @foo()
+
+  %t0 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %ret, i64 %val0)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add0, i64 %val1)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add1, i64 %val2)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add2, i64 %val3)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add3, i64 %val4)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add4, i64 %val5)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add5, i64 %val6)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add6, i64 %val7)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add7, i64 %val8)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add8, i64 %val9)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-sadd-04.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-sadd-04.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-sadd-04.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-sadd-04.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,312 @@
+; Test additions between an i64 and a sign-extended i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check AGFR.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: agfr %r3, %r4
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: agfr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: agfr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check AGF with no displacement.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: agf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %b = load i32, i32 *%src
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned AGF range.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: agf %r3, 524284(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131071
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r4, 524288
+; CHECK: agf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131072
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned AGF range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: agf %r3, -4(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -1
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the AGF range.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: agf %r3, -524288(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131072
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r4, -524292
+; CHECK: agf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131073
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that AGF allows an index.
+define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, i64 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: agf %r4, 524284({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: stg %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 524284
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that additions of spilled values can use AGF rather than AGFR.
+define zeroext i1 @f11(i32 *%ptr0) {
+; CHECK-LABEL: f11:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: agf %r2, 16{{[04]}}(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+  %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+  %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+  %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+  %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+  %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+  %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+  %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+  %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
+
+  %val0 = load i32 , i32 *%ptr0
+  %val1 = load i32 , i32 *%ptr1
+  %val2 = load i32 , i32 *%ptr2
+  %val3 = load i32 , i32 *%ptr3
+  %val4 = load i32 , i32 *%ptr4
+  %val5 = load i32 , i32 *%ptr5
+  %val6 = load i32 , i32 *%ptr6
+  %val7 = load i32 , i32 *%ptr7
+  %val8 = load i32 , i32 *%ptr8
+  %val9 = load i32 , i32 *%ptr9
+
+  %frob0 = add i32 %val0, 100
+  %frob1 = add i32 %val1, 100
+  %frob2 = add i32 %val2, 100
+  %frob3 = add i32 %val3, 100
+  %frob4 = add i32 %val4, 100
+  %frob5 = add i32 %val5, 100
+  %frob6 = add i32 %val6, 100
+  %frob7 = add i32 %val7, 100
+  %frob8 = add i32 %val8, 100
+  %frob9 = add i32 %val9, 100
+
+  store i32 %frob0, i32 *%ptr0
+  store i32 %frob1, i32 *%ptr1
+  store i32 %frob2, i32 *%ptr2
+  store i32 %frob3, i32 *%ptr3
+  store i32 %frob4, i32 *%ptr4
+  store i32 %frob5, i32 *%ptr5
+  store i32 %frob6, i32 *%ptr6
+  store i32 %frob7, i32 *%ptr7
+  store i32 %frob8, i32 *%ptr8
+  store i32 %frob9, i32 *%ptr9
+
+  %ret = call i64 @foo()
+
+  %ext0 = sext i32 %frob0 to i64
+  %ext1 = sext i32 %frob1 to i64
+  %ext2 = sext i32 %frob2 to i64
+  %ext3 = sext i32 %frob3 to i64
+  %ext4 = sext i32 %frob4 to i64
+  %ext5 = sext i32 %frob5 to i64
+  %ext6 = sext i32 %frob6 to i64
+  %ext7 = sext i32 %frob7 to i64
+  %ext8 = sext i32 %frob8 to i64
+  %ext9 = sext i32 %frob9 to i64
+
+  %t0 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %ret, i64 %ext0)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add0, i64 %ext1)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add1, i64 %ext2)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add2, i64 %ext3)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add3, i64 %ext4)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add4, i64 %ext5)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add5, i64 %ext6)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add6, i64 %ext7)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add7, i64 %ext8)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %add8, i64 %ext9)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-sadd-05.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-sadd-05.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-sadd-05.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-sadd-05.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,186 @@
+; Test additions between an i64 and a sign-extended i16 on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i64 @foo()
+
+; Check AGH with no displacement.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: agh %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned AGH range.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: agh %r3, 524286(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 262143
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: agfi %r4, 524288
+; CHECK: agh %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 262144
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned AGH range.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: agh %r3, -2(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -1
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the AGH range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: agh %r3, -524288(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -262144
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: agfi %r4, -524290
+; CHECK: agh %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -262145
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that AGH allows an index.
+define zeroext i1 @f9(i64 %src, i64 %index, i64 %a, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: agh %r4, 524284({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: stg %r4, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 524284
+  %ptr = inttoptr i64 %add2 to i16 *
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f11(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: agh %r3, 0(%r4)
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f12(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: agh %r3, 0(%r4)
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-sadd-06.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-sadd-06.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-sadd-06.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-sadd-06.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,212 @@
+; Test 32-bit addition in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i32 @foo()
+
+; Check additions of 1.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: ahi %r3, 1
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the AHI range.
+define zeroext i1 @f2(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: ahi %r3, 32767
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 32767)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which must use AFI instead.
+define zeroext i1 @f3(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: afi %r3, 32768
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 32768)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the signed 32-bit range.
+define zeroext i1 @f4(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: afi %r3, 2147483647
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 2147483647)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which is treated as a negative value.
+define zeroext i1 @f5(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: afi %r3, -2147483648
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 2147483648)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative AHI range.
+define zeroext i1 @f6(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: ahi %r3, -1
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 -1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the AHI range.
+define zeroext i1 @f7(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: ahi %r3, -32768
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 -32768)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which must use AFI instead.
+define zeroext i1 @f8(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: afi %r3, -32769
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 -32769)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the signed 32-bit range.
+define zeroext i1 @f9(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: afi %r3, -2147483648
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 -2147483648)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which is treated as a positive value.
+define zeroext i1 @f10(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: afi %r3, 2147483647
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 -2147483649)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f11(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: ahi %r3, 1
+; CHECK: st %r3, 0(%r4)
+; CHECK: {{jgo foo at PLT|bnor %r14}}
+; CHECK: {{br %r14|jg foo at PLT}}
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f12(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: ahi %r3, 1
+; CHECK: st %r3, 0(%r4)
+; CHECK: {{jgno foo at PLT|bor %r14}}
+; CHECK: {{br %r14|jg foo at PLT}}
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-sadd-07.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-sadd-07.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-sadd-07.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-sadd-07.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,214 @@
+; Test 64-bit addition in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i32 @foo()
+
+; Check additions of 1.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: aghi %r3, 1
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+
+}
+
+; Check the high end of the AGHI range.
+define zeroext i1 @f2(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: aghi %r3, 32767
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 32767)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which must use AGFI instead.
+define zeroext i1 @f3(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: agfi %r3, 32768
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 32768)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the AGFI range.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: agfi %r3, 2147483647
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 2147483647)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which must be loaded into a register first.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: llilh [[REG1:%r[0-9]+]], 32768
+; CHECK: agr [[REG1]], %r3
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 2147483648)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative AGHI range.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: aghi %r3, -1
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 -1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the AGHI range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: aghi %r3, -32768
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 -32768)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which must use AGFI instead.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: agfi %r3, -32769
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 -32769)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the AGFI range.
+define zeroext i1 @f9(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r3, -2147483648
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 -2147483648)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which must use register addition instead.
+define zeroext i1 @f10(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: llihf [[REG1:%r[0-9]+]], 4294967295
+; CHECK: agr [[REG1]], %r3
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 -2147483649)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f11(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: aghi %r3, 1
+; CHECK: stg %r3, 0(%r4)
+; CHECK: {{jgo foo at PLT|bnor %r14}}
+; CHECK: {{br %r14|jg foo at PLT}}
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f12(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: aghi %r3, 1
+; CHECK: stg %r3, 0(%r4)
+; CHECK: {{jgno foo at PLT|bor %r14}}
+; CHECK: {{br %r14|jg foo at PLT}}
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-sadd-08.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-sadd-08.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-sadd-08.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-sadd-08.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,490 @@
+; Test 32-bit additions of constants to memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check addition of 1.
+define zeroext i1 @f1(i32 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: asi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the constant range.
+define zeroext i1 @f2(i32 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: asi 0(%r2), 127
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 127)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next constant up, which must use an addition and a store.
+define zeroext i1 @f3(i32 %dummy, i32 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: l [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: ahi [[VAL]], 128
+; CHECK-DAG: st [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 128)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the constant range.
+define zeroext i1 @f4(i32 *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: asi 0(%r2), -128
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 -128)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next value down, with the same comment as f3.
+define zeroext i1 @f5(i32 %dummy, i32 *%ptr) {
+; CHECK-LABEL: f5:
+; CHECK: l [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: ahi [[VAL]], -129
+; CHECK-DAG: st [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 -129)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the aligned ASI range.
+define zeroext i1 @f6(i32 *%base) {
+; CHECK-LABEL: f6:
+; CHECK: asi 524284(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 131071
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f7(i32 *%base) {
+; CHECK-LABEL: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: asi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 131072
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the ASI range.
+define zeroext i1 @f8(i32 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: asi -524288(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 -131072
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word down, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i32 *%base) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r2, -524292
+; CHECK: asi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 -131073
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check that ASI does not allow indices.
+define zeroext i1 @f10(i64 %base, i64 %index) {
+; CHECK-LABEL: f10:
+; CHECK: agr %r2, %r3
+; CHECK: asi 4(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %base, %index
+  %add2 = add i64 %add1, 4
+  %ptr = inttoptr i64 %add2 to i32 *
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check that adding 127 to a spilled value can use ASI.
+define zeroext i1 @f11(i32 *%ptr, i32 %sel) {
+; CHECK-LABEL: f11:
+; CHECK: asi {{[0-9]+}}(%r15), 127
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i32, i32 *%ptr
+  %val1 = load volatile i32, i32 *%ptr
+  %val2 = load volatile i32, i32 *%ptr
+  %val3 = load volatile i32, i32 *%ptr
+  %val4 = load volatile i32, i32 *%ptr
+  %val5 = load volatile i32, i32 *%ptr
+  %val6 = load volatile i32, i32 *%ptr
+  %val7 = load volatile i32, i32 *%ptr
+  %val8 = load volatile i32, i32 *%ptr
+  %val9 = load volatile i32, i32 *%ptr
+  %val10 = load volatile i32, i32 *%ptr
+  %val11 = load volatile i32, i32 *%ptr
+  %val12 = load volatile i32, i32 *%ptr
+  %val13 = load volatile i32, i32 *%ptr
+  %val14 = load volatile i32, i32 *%ptr
+  %val15 = load volatile i32, i32 *%ptr
+
+  %test = icmp ne i32 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val0, i32 127)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val1, i32 127)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val2, i32 127)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val3, i32 127)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val4, i32 127)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val5, i32 127)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val6, i32 127)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val7, i32 127)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val8, i32 127)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val9, i32 127)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val10, i32 127)
+  %add10 = extractvalue {i32, i1} %t10, 0
+  %obit10 = extractvalue {i32, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val11, i32 127)
+  %add11 = extractvalue {i32, i1} %t11, 0
+  %obit11 = extractvalue {i32, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val12, i32 127)
+  %add12 = extractvalue {i32, i1} %t12, 0
+  %obit12 = extractvalue {i32, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val13, i32 127)
+  %add13 = extractvalue {i32, i1} %t13, 0
+  %obit13 = extractvalue {i32, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val14, i32 127)
+  %add14 = extractvalue {i32, i1} %t14, 0
+  %obit14 = extractvalue {i32, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val15, i32 127)
+  %add15 = extractvalue {i32, i1} %t15, 0
+  %obit15 = extractvalue {i32, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i32 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i32 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i32 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i32 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i32 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i32 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i32 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i32 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i32 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i32 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i32 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i32 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i32 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i32 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i32 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i32 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i32 %new0, i32 *%ptr
+  store volatile i32 %new1, i32 *%ptr
+  store volatile i32 %new2, i32 *%ptr
+  store volatile i32 %new3, i32 *%ptr
+  store volatile i32 %new4, i32 *%ptr
+  store volatile i32 %new5, i32 *%ptr
+  store volatile i32 %new6, i32 *%ptr
+  store volatile i32 %new7, i32 *%ptr
+  store volatile i32 %new8, i32 *%ptr
+  store volatile i32 %new9, i32 *%ptr
+  store volatile i32 %new10, i32 *%ptr
+  store volatile i32 %new11, i32 *%ptr
+  store volatile i32 %new12, i32 *%ptr
+  store volatile i32 %new13, i32 *%ptr
+  store volatile i32 %new14, i32 *%ptr
+  store volatile i32 %new15, i32 *%ptr
+
+  ret i1 %res
+}
+
+; Check that adding -128 to a spilled value can use ASI.
+define zeroext i1 @f12(i32 *%ptr, i32 %sel) {
+; CHECK-LABEL: f12:
+; CHECK: asi {{[0-9]+}}(%r15), -128
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i32, i32 *%ptr
+  %val1 = load volatile i32, i32 *%ptr
+  %val2 = load volatile i32, i32 *%ptr
+  %val3 = load volatile i32, i32 *%ptr
+  %val4 = load volatile i32, i32 *%ptr
+  %val5 = load volatile i32, i32 *%ptr
+  %val6 = load volatile i32, i32 *%ptr
+  %val7 = load volatile i32, i32 *%ptr
+  %val8 = load volatile i32, i32 *%ptr
+  %val9 = load volatile i32, i32 *%ptr
+  %val10 = load volatile i32, i32 *%ptr
+  %val11 = load volatile i32, i32 *%ptr
+  %val12 = load volatile i32, i32 *%ptr
+  %val13 = load volatile i32, i32 *%ptr
+  %val14 = load volatile i32, i32 *%ptr
+  %val15 = load volatile i32, i32 *%ptr
+
+  %test = icmp ne i32 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val0, i32 -128)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val1, i32 -128)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val2, i32 -128)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val3, i32 -128)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val4, i32 -128)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val5, i32 -128)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val6, i32 -128)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val7, i32 -128)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val8, i32 -128)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val9, i32 -128)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val10, i32 -128)
+  %add10 = extractvalue {i32, i1} %t10, 0
+  %obit10 = extractvalue {i32, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val11, i32 -128)
+  %add11 = extractvalue {i32, i1} %t11, 0
+  %obit11 = extractvalue {i32, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val12, i32 -128)
+  %add12 = extractvalue {i32, i1} %t12, 0
+  %obit12 = extractvalue {i32, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val13, i32 -128)
+  %add13 = extractvalue {i32, i1} %t13, 0
+  %obit13 = extractvalue {i32, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val14, i32 -128)
+  %add14 = extractvalue {i32, i1} %t14, 0
+  %obit14 = extractvalue {i32, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %val15, i32 -128)
+  %add15 = extractvalue {i32, i1} %t15, 0
+  %obit15 = extractvalue {i32, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i32 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i32 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i32 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i32 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i32 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i32 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i32 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i32 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i32 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i32 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i32 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i32 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i32 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i32 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i32 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i32 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i32 %new0, i32 *%ptr
+  store volatile i32 %new1, i32 *%ptr
+  store volatile i32 %new2, i32 *%ptr
+  store volatile i32 %new3, i32 *%ptr
+  store volatile i32 %new4, i32 *%ptr
+  store volatile i32 %new5, i32 *%ptr
+  store volatile i32 %new6, i32 *%ptr
+  store volatile i32 %new7, i32 *%ptr
+  store volatile i32 %new8, i32 *%ptr
+  store volatile i32 %new9, i32 *%ptr
+  store volatile i32 %new10, i32 *%ptr
+  store volatile i32 %new11, i32 *%ptr
+  store volatile i32 %new12, i32 *%ptr
+  store volatile i32 %new13, i32 *%ptr
+  store volatile i32 %new14, i32 *%ptr
+  store volatile i32 %new15, i32 *%ptr
+
+  ret i1 %res
+}
+
+; Check using the overflow result for a branch.
+define void @f13(i32 *%ptr) {
+; CHECK-LABEL: f13:
+; CHECK: asi 0(%r2), 1
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f14(i32 *%ptr) {
+; CHECK-LABEL: f14:
+; CHECK: asi 0(%r2), 1
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-sadd-09.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-sadd-09.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-sadd-09.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-sadd-09.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,490 @@
+; Test 64-bit additions of constants to memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check addition of 1.
+define zeroext i1 @f1(i64 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: agsi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the constant range.
+define zeroext i1 @f2(i64 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: agsi 0(%r2), 127
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 127)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next constant up, which must use an addition and a store.
+define zeroext i1 @f3(i64 %dummy, i64 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: lg [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: aghi [[VAL]], 128
+; CHECK-DAG: stg [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 128)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the constant range.
+define zeroext i1 @f4(i64 *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: agsi 0(%r2), -128
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 -128)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next value down, with the same comment as f3.
+define zeroext i1 @f5(i64 %dummy, i64 *%ptr) {
+; CHECK-LABEL: f5:
+; CHECK: lg [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: aghi [[VAL]], -129
+; CHECK-DAG: stg [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 -129)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the aligned AGSI range.
+define zeroext i1 @f6(i64 *%base) {
+; CHECK-LABEL: f6:
+; CHECK: agsi 524280(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 65535
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f7(i64 *%base) {
+; CHECK-LABEL: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: agsi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 65536
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the AGSI range.
+define zeroext i1 @f8(i64 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: agsi -524288(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 -65536
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word down, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 *%base) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r2, -524296
+; CHECK: agsi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 -65537
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check that AGSI does not allow indices.
+define zeroext i1 @f10(i64 %base, i64 %index) {
+; CHECK-LABEL: f10:
+; CHECK: agr %r2, %r3
+; CHECK: agsi 4(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %base, %index
+  %add2 = add i64 %add1, 4
+  %ptr = inttoptr i64 %add2 to i64 *
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check that adding 127 to a spilled value can use AGSI.
+define zeroext i1 @f11(i64 *%ptr, i64 %sel) {
+; CHECK-LABEL: f11:
+; CHECK: agsi {{[0-9]+}}(%r15), 127
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i64, i64 *%ptr
+  %val1 = load volatile i64, i64 *%ptr
+  %val2 = load volatile i64, i64 *%ptr
+  %val3 = load volatile i64, i64 *%ptr
+  %val4 = load volatile i64, i64 *%ptr
+  %val5 = load volatile i64, i64 *%ptr
+  %val6 = load volatile i64, i64 *%ptr
+  %val7 = load volatile i64, i64 *%ptr
+  %val8 = load volatile i64, i64 *%ptr
+  %val9 = load volatile i64, i64 *%ptr
+  %val10 = load volatile i64, i64 *%ptr
+  %val11 = load volatile i64, i64 *%ptr
+  %val12 = load volatile i64, i64 *%ptr
+  %val13 = load volatile i64, i64 *%ptr
+  %val14 = load volatile i64, i64 *%ptr
+  %val15 = load volatile i64, i64 *%ptr
+
+  %test = icmp ne i64 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val0, i64 127)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val1, i64 127)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val2, i64 127)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val3, i64 127)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val4, i64 127)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val5, i64 127)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val6, i64 127)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val7, i64 127)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val8, i64 127)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val9, i64 127)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val10, i64 127)
+  %add10 = extractvalue {i64, i1} %t10, 0
+  %obit10 = extractvalue {i64, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val11, i64 127)
+  %add11 = extractvalue {i64, i1} %t11, 0
+  %obit11 = extractvalue {i64, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val12, i64 127)
+  %add12 = extractvalue {i64, i1} %t12, 0
+  %obit12 = extractvalue {i64, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val13, i64 127)
+  %add13 = extractvalue {i64, i1} %t13, 0
+  %obit13 = extractvalue {i64, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val14, i64 127)
+  %add14 = extractvalue {i64, i1} %t14, 0
+  %obit14 = extractvalue {i64, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val15, i64 127)
+  %add15 = extractvalue {i64, i1} %t15, 0
+  %obit15 = extractvalue {i64, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i64 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i64 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i64 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i64 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i64 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i64 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i64 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i64 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i64 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i64 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i64 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i64 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i64 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i64 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i64 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i64 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i64 %new0, i64 *%ptr
+  store volatile i64 %new1, i64 *%ptr
+  store volatile i64 %new2, i64 *%ptr
+  store volatile i64 %new3, i64 *%ptr
+  store volatile i64 %new4, i64 *%ptr
+  store volatile i64 %new5, i64 *%ptr
+  store volatile i64 %new6, i64 *%ptr
+  store volatile i64 %new7, i64 *%ptr
+  store volatile i64 %new8, i64 *%ptr
+  store volatile i64 %new9, i64 *%ptr
+  store volatile i64 %new10, i64 *%ptr
+  store volatile i64 %new11, i64 *%ptr
+  store volatile i64 %new12, i64 *%ptr
+  store volatile i64 %new13, i64 *%ptr
+  store volatile i64 %new14, i64 *%ptr
+  store volatile i64 %new15, i64 *%ptr
+
+  ret i1 %res
+}
+
+; Check that adding -128 to a spilled value can use AGSI.
+define zeroext i1 @f12(i64 *%ptr, i64 %sel) {
+; CHECK-LABEL: f12:
+; CHECK: agsi {{[0-9]+}}(%r15), -128
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i64, i64 *%ptr
+  %val1 = load volatile i64, i64 *%ptr
+  %val2 = load volatile i64, i64 *%ptr
+  %val3 = load volatile i64, i64 *%ptr
+  %val4 = load volatile i64, i64 *%ptr
+  %val5 = load volatile i64, i64 *%ptr
+  %val6 = load volatile i64, i64 *%ptr
+  %val7 = load volatile i64, i64 *%ptr
+  %val8 = load volatile i64, i64 *%ptr
+  %val9 = load volatile i64, i64 *%ptr
+  %val10 = load volatile i64, i64 *%ptr
+  %val11 = load volatile i64, i64 *%ptr
+  %val12 = load volatile i64, i64 *%ptr
+  %val13 = load volatile i64, i64 *%ptr
+  %val14 = load volatile i64, i64 *%ptr
+  %val15 = load volatile i64, i64 *%ptr
+
+  %test = icmp ne i64 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val0, i64 -128)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val1, i64 -128)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val2, i64 -128)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val3, i64 -128)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val4, i64 -128)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val5, i64 -128)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val6, i64 -128)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val7, i64 -128)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val8, i64 -128)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val9, i64 -128)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val10, i64 -128)
+  %add10 = extractvalue {i64, i1} %t10, 0
+  %obit10 = extractvalue {i64, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val11, i64 -128)
+  %add11 = extractvalue {i64, i1} %t11, 0
+  %obit11 = extractvalue {i64, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val12, i64 -128)
+  %add12 = extractvalue {i64, i1} %t12, 0
+  %obit12 = extractvalue {i64, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val13, i64 -128)
+  %add13 = extractvalue {i64, i1} %t13, 0
+  %obit13 = extractvalue {i64, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val14, i64 -128)
+  %add14 = extractvalue {i64, i1} %t14, 0
+  %obit14 = extractvalue {i64, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %val15, i64 -128)
+  %add15 = extractvalue {i64, i1} %t15, 0
+  %obit15 = extractvalue {i64, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i64 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i64 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i64 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i64 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i64 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i64 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i64 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i64 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i64 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i64 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i64 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i64 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i64 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i64 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i64 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i64 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i64 %new0, i64 *%ptr
+  store volatile i64 %new1, i64 *%ptr
+  store volatile i64 %new2, i64 *%ptr
+  store volatile i64 %new3, i64 *%ptr
+  store volatile i64 %new4, i64 *%ptr
+  store volatile i64 %new5, i64 *%ptr
+  store volatile i64 %new6, i64 *%ptr
+  store volatile i64 %new7, i64 *%ptr
+  store volatile i64 %new8, i64 *%ptr
+  store volatile i64 %new9, i64 *%ptr
+  store volatile i64 %new10, i64 *%ptr
+  store volatile i64 %new11, i64 *%ptr
+  store volatile i64 %new12, i64 *%ptr
+  store volatile i64 %new13, i64 *%ptr
+  store volatile i64 %new14, i64 *%ptr
+  store volatile i64 %new15, i64 *%ptr
+
+  ret i1 %res
+}
+
+; Check using the overflow result for a branch.
+define void @f13(i64 *%ptr) {
+; CHECK-LABEL: f13:
+; CHECK: agsi 0(%r2), 1
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f14(i64 *%ptr) {
+; CHECK-LABEL: f14:
+; CHECK: agsi 0(%r2), 1
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-ssub-01.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-ssub-01.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-ssub-01.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-ssub-01.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,325 @@
+; Test 32-bit subtraction in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check SR.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: sr %r3, %r4
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: sr %r3, %r4
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: sr %r3, %r4
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check the low end of the S range.
+define zeroext i1 @f4(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: s %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %b = load i32, i32 *%src
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned S range.
+define zeroext i1 @f5(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: s %r3, 4092(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 1023
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word up, which should use SY instead of S.
+define zeroext i1 @f6(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: sy %r3, 4096(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 1024
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned SY range.
+define zeroext i1 @f7(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: sy %r3, 524284(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131071
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f8(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: agfi %r4, 524288
+; CHECK: s %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131072
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned SY range.
+define zeroext i1 @f9(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: sy %r3, -4(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -1
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the SY range.
+define zeroext i1 @f10(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: sy %r3, -524288(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131072
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f11(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: agfi %r4, -524292
+; CHECK: s %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131073
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that S allows an index.
+define zeroext i1 @f12(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: s %r4, 4092({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4092
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that SY allows an index.
+define zeroext i1 @f13(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f13:
+; CHECK: sy %r4, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4096
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that subtractions of spilled values can use S rather than SR.
+define zeroext i1 @f14(i32 *%ptr0) {
+; CHECK-LABEL: f14:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: s %r2, 16{{[04]}}(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+  %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+  %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+  %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+  %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+  %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+  %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+  %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+  %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
+
+  %val0 = load i32, i32 *%ptr0
+  %val1 = load i32, i32 *%ptr1
+  %val2 = load i32, i32 *%ptr2
+  %val3 = load i32, i32 *%ptr3
+  %val4 = load i32, i32 *%ptr4
+  %val5 = load i32, i32 *%ptr5
+  %val6 = load i32, i32 *%ptr6
+  %val7 = load i32, i32 *%ptr7
+  %val8 = load i32, i32 *%ptr8
+  %val9 = load i32, i32 *%ptr9
+
+  %ret = call i32 @foo()
+
+  %t0 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %ret, i32 %val0)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %add0, i32 %val1)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %add1, i32 %val2)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %add2, i32 %val3)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %add3, i32 %val4)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %add4, i32 %val5)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %add5, i32 %val6)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %add6, i32 %val7)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %add7, i32 %val8)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %add8, i32 %val9)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-ssub-02.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-ssub-02.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-ssub-02.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-ssub-02.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,253 @@
+; Test 32-bit subtraction in which the second operand is a sign-extended
+; i16 memory value.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check the low end of the SH range.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: sh %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned SH range.
+define zeroext i1 @f2(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: sh %r3, 4094(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 2047
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next halfword up, which should use SHY instead of SH.
+define zeroext i1 @f3(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: shy %r3, 4096(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 2048
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned SHY range.
+define zeroext i1 @f4(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: shy %r3, 524286(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 262143
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f5(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: agfi %r4, 524288
+; CHECK: sh %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 262144
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned SHY range.
+define zeroext i1 @f6(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: shy %r3, -2(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -1
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the SHY range.
+define zeroext i1 @f7(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: shy %r3, -524288(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -262144
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f8(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: agfi %r4, -524290
+; CHECK: sh %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -262145
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that SH allows an index.
+define zeroext i1 @f9(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: sh %r4, 4094({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4094
+  %ptr = inttoptr i64 %add2 to i16 *
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that SHY allows an index.
+define zeroext i1 @f10(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: shy %r4, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4096
+  %ptr = inttoptr i64 %add2 to i16 *
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f11(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: sh %r3, 0(%r4)
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f12(i32 %dummy, i32 %a, i16 *%src, i32 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: sh %r3, 0(%r4)
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i32
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+
+declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-ssub-03.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-ssub-03.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-ssub-03.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-ssub-03.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,269 @@
+; Test 64-bit subtraction in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check SGR.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: sgr %r3, %r4
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: sgr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: sgr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check SG with no displacement.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: sg %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %b = load i64, i64 *%src
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned SG range.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: sg %r3, 524280(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 65535
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r4, 524288
+; CHECK: sg %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 65536
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned SG range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: sg %r3, -8(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -1
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the SG range.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: sg %r3, -524288(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -65536
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r4, -524296
+; CHECK: sg %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -65537
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that SG allows an index.
+define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, i64 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: sg %r4, 524280({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: stg %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 524280
+  %ptr = inttoptr i64 %add2 to i64 *
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that subtractions of spilled values can use SG rather than SGR.
+define zeroext i1 @f11(i64 *%ptr0) {
+; CHECK-LABEL: f11:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: sg %r2, 16{{[04]}}(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+  %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+  %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+  %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+  %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+  %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+  %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+  %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+  %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
+
+  %val0 = load i64, i64 *%ptr0
+  %val1 = load i64, i64 *%ptr1
+  %val2 = load i64, i64 *%ptr2
+  %val3 = load i64, i64 *%ptr3
+  %val4 = load i64, i64 *%ptr4
+  %val5 = load i64, i64 *%ptr5
+  %val6 = load i64, i64 *%ptr6
+  %val7 = load i64, i64 *%ptr7
+  %val8 = load i64, i64 *%ptr8
+  %val9 = load i64, i64 *%ptr9
+
+  %ret = call i64 @foo()
+
+  %t0 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %ret, i64 %val0)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add0, i64 %val1)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add1, i64 %val2)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add2, i64 %val3)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add3, i64 %val4)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add4, i64 %val5)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add5, i64 %val6)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add6, i64 %val7)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add7, i64 %val8)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add8, i64 %val9)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-ssub-04.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-ssub-04.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-ssub-04.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-ssub-04.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,312 @@
+; Test subtractions between an i64 and a sign-extended i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check SGFR.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: sgfr %r3, %r4
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: sgfr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: sgfr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check SGF with no displacement.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: sgf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %b = load i32, i32 *%src
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned SGF range.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: sgf %r3, 524284(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131071
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r4, 524288
+; CHECK: sgf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131072
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned SGF range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: sgf %r3, -4(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -1
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the SGF range.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: sgf %r3, -524288(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131072
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r4, -524292
+; CHECK: sgf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131073
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that SGF allows an index.
+define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, i64 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: sgf %r4, 524284({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: stg %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 524284
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %bext = sext i32 %b to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that subtractions of spilled values can use SGF rather than SGFR.
+define zeroext i1 @f11(i32 *%ptr0) {
+; CHECK-LABEL: f11:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: sgf %r2, 16{{[04]}}(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+  %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+  %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+  %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+  %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+  %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+  %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+  %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+  %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
+
+  %val0 = load i32 , i32 *%ptr0
+  %val1 = load i32 , i32 *%ptr1
+  %val2 = load i32 , i32 *%ptr2
+  %val3 = load i32 , i32 *%ptr3
+  %val4 = load i32 , i32 *%ptr4
+  %val5 = load i32 , i32 *%ptr5
+  %val6 = load i32 , i32 *%ptr6
+  %val7 = load i32 , i32 *%ptr7
+  %val8 = load i32 , i32 *%ptr8
+  %val9 = load i32 , i32 *%ptr9
+
+  %frob0 = add i32 %val0, 100
+  %frob1 = add i32 %val1, 100
+  %frob2 = add i32 %val2, 100
+  %frob3 = add i32 %val3, 100
+  %frob4 = add i32 %val4, 100
+  %frob5 = add i32 %val5, 100
+  %frob6 = add i32 %val6, 100
+  %frob7 = add i32 %val7, 100
+  %frob8 = add i32 %val8, 100
+  %frob9 = add i32 %val9, 100
+
+  store i32 %frob0, i32 *%ptr0
+  store i32 %frob1, i32 *%ptr1
+  store i32 %frob2, i32 *%ptr2
+  store i32 %frob3, i32 *%ptr3
+  store i32 %frob4, i32 *%ptr4
+  store i32 %frob5, i32 *%ptr5
+  store i32 %frob6, i32 *%ptr6
+  store i32 %frob7, i32 *%ptr7
+  store i32 %frob8, i32 *%ptr8
+  store i32 %frob9, i32 *%ptr9
+
+  %ret = call i64 @foo()
+
+  %ext0 = sext i32 %frob0 to i64
+  %ext1 = sext i32 %frob1 to i64
+  %ext2 = sext i32 %frob2 to i64
+  %ext3 = sext i32 %frob3 to i64
+  %ext4 = sext i32 %frob4 to i64
+  %ext5 = sext i32 %frob5 to i64
+  %ext6 = sext i32 %frob6 to i64
+  %ext7 = sext i32 %frob7 to i64
+  %ext8 = sext i32 %frob8 to i64
+  %ext9 = sext i32 %frob9 to i64
+
+  %t0 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %ret, i64 %ext0)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add0, i64 %ext1)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add1, i64 %ext2)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add2, i64 %ext3)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add3, i64 %ext4)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add4, i64 %ext5)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add5, i64 %ext6)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add6, i64 %ext7)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add7, i64 %ext8)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %add8, i64 %ext9)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-ssub-05.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-ssub-05.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-ssub-05.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-ssub-05.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,186 @@
+; Test subtractions between an i64 and a sign-extended i16 on z14.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+
+declare i64 @foo()
+
+; Check SGH with no displacement.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: sgh %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned SGH range.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: sgh %r3, 524286(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 262143
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: agfi %r4, 524288
+; CHECK: sgh %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 262144
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned SGH range.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: sgh %r3, -2(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -1
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the SGH range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: sgh %r3, -524288(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -262144
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: agfi %r4, -524290
+; CHECK: sgh %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %ptr = getelementptr i16, i16 *%src, i64 -262145
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that SGH allows an index.
+define zeroext i1 @f9(i64 %src, i64 %index, i64 %a, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: sgh %r4, 524284({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: stg %r4, 0(%r5)
+; CHECK-DAG: lghi %r2, 0
+; CHECK-DAG: locghio %r2, 1
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 524284
+  %ptr = inttoptr i64 %add2 to i16 *
+  %half = load i16, i16 *%ptr
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f11(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: sgh %r3, 0(%r4)
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f12(i64 %dummy, i64 %a, i16 *%src, i64 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: sgh %r3, 0(%r4)
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %half = load i16, i16 *%src
+  %b = sext i16 %half to i64
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-ssub-06.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-ssub-06.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-ssub-06.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-ssub-06.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,248 @@
+; Test 32-bit subtraction in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i32 @foo()
+
+; Check subtractions of 1.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: ahi %r3, -1
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the AHI range.
+define zeroext i1 @f2(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: ahi %r3, -32768
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 32768)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which must use AFI instead.
+define zeroext i1 @f3(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: afi %r3, -32769
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 32769)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the signed 32-bit range.
+define zeroext i1 @f4(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: afi %r3, -2147483647
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 2147483647)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which is treated as a negative value
+; and must use a register.
+define zeroext i1 @f5(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: llilh [[REG1:%r[0-5]]], 32768
+; CHECK: sr %r3, [[REG1]]
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 2147483648)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which is treated as a negative value,
+; and can use AFI again.
+define zeroext i1 @f6(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: afi %r3, 2147483647
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 2147483649)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative AHI range.
+define zeroext i1 @f7(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: ahi %r3, 1
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 -1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the AHI range.
+define zeroext i1 @f8(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: ahi %r3, 32767
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 -32767)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which must use AFI instead.
+define zeroext i1 @f9(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: afi %r3, 32768
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 -32768)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the signed 32-bit range.
+define zeroext i1 @f10(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: afi %r3, 2147483647
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 -2147483647)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which must use a register.
+define zeroext i1 @f11(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: llilh [[REG1:%r[0-5]]], 32768
+; CHECK: sr %r3, [[REG1]]
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 -2147483648)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which is treated as a positive value.
+define zeroext i1 @f12(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: afi %r3, -2147483647
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 -2147483649)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f13(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f13:
+; CHECK: ahi %r3, -1
+; CHECK: st %r3, 0(%r4)
+; CHECK: {{jgo foo at PLT|bnor %r14}}
+; CHECK: {{br %r14|jg foo at PLT}}
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f14(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f14:
+; CHECK: ahi %r3, -1
+; CHECK: st %r3, 0(%r4)
+; CHECK: {{jgno foo at PLT|bor %r14}}
+; CHECK: {{br %r14|jg foo at PLT}}
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+
+declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-ssub-07.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-ssub-07.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-ssub-07.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-ssub-07.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,214 @@
+; Test 64-bit subtraction in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i32 @foo()
+
+; Check subtractions of 1.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: aghi %r3, -1
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+
+}
+
+; Check the high end of the SGHI range.
+define zeroext i1 @f2(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: aghi %r3, -32768
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 32768)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which must use SGFI instead.
+define zeroext i1 @f3(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: agfi %r3, -32769
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 32769)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the SGFI range.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: agfi %r3, -2147483648
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 2147483648)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which must be loaded into a register first.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: llilf [[REG1:%r[0-9]+]], 2147483649
+; CHECK: sgr %r3, [[REG1]]
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 2147483649)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative SGHI range.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: aghi %r3, 1
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 -1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the SGHI range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: aghi %r3, 32767
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 -32767)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which must use SGFI instead.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: agfi %r3, 32768
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 -32768)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the SGFI range.
+define zeroext i1 @f9(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r3, 2147483647
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 -2147483647)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which must use register subtraction instead.
+define zeroext i1 @f10(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: lgfi [[REG1:%r[0-9]+]], -2147483648
+; CHECK: sgr %r3, [[REG1]]
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 -2147483648)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f11(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: aghi %r3, -1
+; CHECK: stg %r3, 0(%r4)
+; CHECK: {{jgo foo at PLT|bnor %r14}}
+; CHECK: {{br %r14|jg foo at PLT}}
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f12(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: aghi %r3, -1
+; CHECK: stg %r3, 0(%r4)
+; CHECK: {{jgno foo at PLT|bor %r14}}
+; CHECK: {{br %r14|jg foo at PLT}}
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-ssub-08.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-ssub-08.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-ssub-08.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-ssub-08.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,490 @@
+; Test 32-bit subtractions of constants to memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check subtraction of 1.
+define zeroext i1 @f1(i32 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: asi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the constant range.
+define zeroext i1 @f2(i32 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: asi 0(%r2), -128
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 128)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next constant up, which must use an subtraction and a store.
+define zeroext i1 @f3(i32 %dummy, i32 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: l [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: ahi [[VAL]], -129
+; CHECK-DAG: st [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 129)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the constant range.
+define zeroext i1 @f4(i32 *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: asi 0(%r2), 127
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 -127)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next value down, with the same comment as f3.
+define zeroext i1 @f5(i32 %dummy, i32 *%ptr) {
+; CHECK-LABEL: f5:
+; CHECK: l [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: ahi [[VAL]], 128
+; CHECK-DAG: st [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 -128)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the aligned ASI range.
+define zeroext i1 @f6(i32 *%base) {
+; CHECK-LABEL: f6:
+; CHECK: asi 524284(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 131071
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f7(i32 *%base) {
+; CHECK-LABEL: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: asi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 131072
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the ASI range.
+define zeroext i1 @f8(i32 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: asi -524288(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 -131072
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word down, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i32 *%base) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r2, -524292
+; CHECK: asi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 -131073
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check that ASI does not allow indices.
+define zeroext i1 @f10(i64 %base, i64 %index) {
+; CHECK-LABEL: f10:
+; CHECK: agr %r2, %r3
+; CHECK: asi 4(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %base, %index
+  %add2 = add i64 %add1, 4
+  %ptr = inttoptr i64 %add2 to i32 *
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check that subtracting 128 from a spilled value can use ASI.
+define zeroext i1 @f11(i32 *%ptr, i32 %sel) {
+; CHECK-LABEL: f11:
+; CHECK: asi {{[0-9]+}}(%r15), -128
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i32, i32 *%ptr
+  %val1 = load volatile i32, i32 *%ptr
+  %val2 = load volatile i32, i32 *%ptr
+  %val3 = load volatile i32, i32 *%ptr
+  %val4 = load volatile i32, i32 *%ptr
+  %val5 = load volatile i32, i32 *%ptr
+  %val6 = load volatile i32, i32 *%ptr
+  %val7 = load volatile i32, i32 *%ptr
+  %val8 = load volatile i32, i32 *%ptr
+  %val9 = load volatile i32, i32 *%ptr
+  %val10 = load volatile i32, i32 *%ptr
+  %val11 = load volatile i32, i32 *%ptr
+  %val12 = load volatile i32, i32 *%ptr
+  %val13 = load volatile i32, i32 *%ptr
+  %val14 = load volatile i32, i32 *%ptr
+  %val15 = load volatile i32, i32 *%ptr
+
+  %test = icmp ne i32 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val0, i32 128)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val1, i32 128)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val2, i32 128)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val3, i32 128)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val4, i32 128)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val5, i32 128)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val6, i32 128)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val7, i32 128)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val8, i32 128)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val9, i32 128)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val10, i32 128)
+  %add10 = extractvalue {i32, i1} %t10, 0
+  %obit10 = extractvalue {i32, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val11, i32 128)
+  %add11 = extractvalue {i32, i1} %t11, 0
+  %obit11 = extractvalue {i32, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val12, i32 128)
+  %add12 = extractvalue {i32, i1} %t12, 0
+  %obit12 = extractvalue {i32, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val13, i32 128)
+  %add13 = extractvalue {i32, i1} %t13, 0
+  %obit13 = extractvalue {i32, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val14, i32 128)
+  %add14 = extractvalue {i32, i1} %t14, 0
+  %obit14 = extractvalue {i32, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val15, i32 128)
+  %add15 = extractvalue {i32, i1} %t15, 0
+  %obit15 = extractvalue {i32, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i32 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i32 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i32 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i32 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i32 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i32 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i32 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i32 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i32 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i32 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i32 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i32 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i32 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i32 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i32 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i32 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i32 %new0, i32 *%ptr
+  store volatile i32 %new1, i32 *%ptr
+  store volatile i32 %new2, i32 *%ptr
+  store volatile i32 %new3, i32 *%ptr
+  store volatile i32 %new4, i32 *%ptr
+  store volatile i32 %new5, i32 *%ptr
+  store volatile i32 %new6, i32 *%ptr
+  store volatile i32 %new7, i32 *%ptr
+  store volatile i32 %new8, i32 *%ptr
+  store volatile i32 %new9, i32 *%ptr
+  store volatile i32 %new10, i32 *%ptr
+  store volatile i32 %new11, i32 *%ptr
+  store volatile i32 %new12, i32 *%ptr
+  store volatile i32 %new13, i32 *%ptr
+  store volatile i32 %new14, i32 *%ptr
+  store volatile i32 %new15, i32 *%ptr
+
+  ret i1 %res
+}
+
+; Check that subtracting -127 from a spilled value can use ASI.
+define zeroext i1 @f12(i32 *%ptr, i32 %sel) {
+; CHECK-LABEL: f12:
+; CHECK: asi {{[0-9]+}}(%r15), 127
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i32, i32 *%ptr
+  %val1 = load volatile i32, i32 *%ptr
+  %val2 = load volatile i32, i32 *%ptr
+  %val3 = load volatile i32, i32 *%ptr
+  %val4 = load volatile i32, i32 *%ptr
+  %val5 = load volatile i32, i32 *%ptr
+  %val6 = load volatile i32, i32 *%ptr
+  %val7 = load volatile i32, i32 *%ptr
+  %val8 = load volatile i32, i32 *%ptr
+  %val9 = load volatile i32, i32 *%ptr
+  %val10 = load volatile i32, i32 *%ptr
+  %val11 = load volatile i32, i32 *%ptr
+  %val12 = load volatile i32, i32 *%ptr
+  %val13 = load volatile i32, i32 *%ptr
+  %val14 = load volatile i32, i32 *%ptr
+  %val15 = load volatile i32, i32 *%ptr
+
+  %test = icmp ne i32 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val0, i32 -127)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val1, i32 -127)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val2, i32 -127)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val3, i32 -127)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val4, i32 -127)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val5, i32 -127)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val6, i32 -127)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val7, i32 -127)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val8, i32 -127)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val9, i32 -127)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val10, i32 -127)
+  %add10 = extractvalue {i32, i1} %t10, 0
+  %obit10 = extractvalue {i32, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val11, i32 -127)
+  %add11 = extractvalue {i32, i1} %t11, 0
+  %obit11 = extractvalue {i32, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val12, i32 -127)
+  %add12 = extractvalue {i32, i1} %t12, 0
+  %obit12 = extractvalue {i32, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val13, i32 -127)
+  %add13 = extractvalue {i32, i1} %t13, 0
+  %obit13 = extractvalue {i32, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val14, i32 -127)
+  %add14 = extractvalue {i32, i1} %t14, 0
+  %obit14 = extractvalue {i32, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %val15, i32 -127)
+  %add15 = extractvalue {i32, i1} %t15, 0
+  %obit15 = extractvalue {i32, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i32 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i32 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i32 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i32 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i32 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i32 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i32 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i32 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i32 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i32 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i32 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i32 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i32 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i32 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i32 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i32 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i32 %new0, i32 *%ptr
+  store volatile i32 %new1, i32 *%ptr
+  store volatile i32 %new2, i32 *%ptr
+  store volatile i32 %new3, i32 *%ptr
+  store volatile i32 %new4, i32 *%ptr
+  store volatile i32 %new5, i32 *%ptr
+  store volatile i32 %new6, i32 *%ptr
+  store volatile i32 %new7, i32 *%ptr
+  store volatile i32 %new8, i32 *%ptr
+  store volatile i32 %new9, i32 *%ptr
+  store volatile i32 %new10, i32 *%ptr
+  store volatile i32 %new11, i32 *%ptr
+  store volatile i32 %new12, i32 *%ptr
+  store volatile i32 %new13, i32 *%ptr
+  store volatile i32 %new14, i32 *%ptr
+  store volatile i32 %new15, i32 *%ptr
+
+  ret i1 %res
+}
+
+; Check using the overflow result for a branch.
+define void @f13(i32 *%ptr) {
+; CHECK-LABEL: f13:
+; CHECK: asi 0(%r2), -1
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f14(i32 *%ptr) {
+; CHECK-LABEL: f14:
+; CHECK: asi 0(%r2), -1
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-ssub-09.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-ssub-09.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-ssub-09.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-ssub-09.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,490 @@
+; Test 64-bit subtractions of constants to memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check subtraction of 1.
+define zeroext i1 @f1(i64 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: agsi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the constant range.
+define zeroext i1 @f2(i64 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: agsi 0(%r2), -128
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 128)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next constant up, which must use an subtraction and a store.
+define zeroext i1 @f3(i64 %dummy, i64 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: lg [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: aghi [[VAL]], -129
+; CHECK-DAG: stg [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 129)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the constant range.
+define zeroext i1 @f4(i64 *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: agsi 0(%r2), 127
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 -127)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next value down, with the same comment as f3.
+define zeroext i1 @f5(i64 %dummy, i64 *%ptr) {
+; CHECK-LABEL: f5:
+; CHECK: lg [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: aghi [[VAL]], 128
+; CHECK-DAG: stg [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], 1342177280
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 -128)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the aligned AGSI range.
+define zeroext i1 @f6(i64 *%base) {
+; CHECK-LABEL: f6:
+; CHECK: agsi 524280(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 65535
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f7(i64 *%base) {
+; CHECK-LABEL: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: agsi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 65536
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the AGSI range.
+define zeroext i1 @f8(i64 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: agsi -524288(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 -65536
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word down, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 *%base) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r2, -524296
+; CHECK: agsi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 -65537
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check that AGSI does not allow indices.
+define zeroext i1 @f10(i64 %base, i64 %index) {
+; CHECK-LABEL: f10:
+; CHECK: agr %r2, %r3
+; CHECK: agsi 4(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], 1342177280
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %base, %index
+  %add2 = add i64 %add1, 4
+  %ptr = inttoptr i64 %add2 to i64 *
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check that subtracting 128 to a spilled value can use AGSI.
+define zeroext i1 @f11(i64 *%ptr, i64 %sel) {
+; CHECK-LABEL: f11:
+; CHECK: agsi {{[0-9]+}}(%r15), -128
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i64, i64 *%ptr
+  %val1 = load volatile i64, i64 *%ptr
+  %val2 = load volatile i64, i64 *%ptr
+  %val3 = load volatile i64, i64 *%ptr
+  %val4 = load volatile i64, i64 *%ptr
+  %val5 = load volatile i64, i64 *%ptr
+  %val6 = load volatile i64, i64 *%ptr
+  %val7 = load volatile i64, i64 *%ptr
+  %val8 = load volatile i64, i64 *%ptr
+  %val9 = load volatile i64, i64 *%ptr
+  %val10 = load volatile i64, i64 *%ptr
+  %val11 = load volatile i64, i64 *%ptr
+  %val12 = load volatile i64, i64 *%ptr
+  %val13 = load volatile i64, i64 *%ptr
+  %val14 = load volatile i64, i64 *%ptr
+  %val15 = load volatile i64, i64 *%ptr
+
+  %test = icmp ne i64 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val0, i64 128)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val1, i64 128)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val2, i64 128)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val3, i64 128)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val4, i64 128)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val5, i64 128)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val6, i64 128)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val7, i64 128)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val8, i64 128)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val9, i64 128)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val10, i64 128)
+  %add10 = extractvalue {i64, i1} %t10, 0
+  %obit10 = extractvalue {i64, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val11, i64 128)
+  %add11 = extractvalue {i64, i1} %t11, 0
+  %obit11 = extractvalue {i64, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val12, i64 128)
+  %add12 = extractvalue {i64, i1} %t12, 0
+  %obit12 = extractvalue {i64, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val13, i64 128)
+  %add13 = extractvalue {i64, i1} %t13, 0
+  %obit13 = extractvalue {i64, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val14, i64 128)
+  %add14 = extractvalue {i64, i1} %t14, 0
+  %obit14 = extractvalue {i64, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val15, i64 128)
+  %add15 = extractvalue {i64, i1} %t15, 0
+  %obit15 = extractvalue {i64, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i64 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i64 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i64 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i64 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i64 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i64 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i64 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i64 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i64 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i64 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i64 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i64 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i64 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i64 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i64 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i64 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i64 %new0, i64 *%ptr
+  store volatile i64 %new1, i64 *%ptr
+  store volatile i64 %new2, i64 *%ptr
+  store volatile i64 %new3, i64 *%ptr
+  store volatile i64 %new4, i64 *%ptr
+  store volatile i64 %new5, i64 *%ptr
+  store volatile i64 %new6, i64 *%ptr
+  store volatile i64 %new7, i64 *%ptr
+  store volatile i64 %new8, i64 *%ptr
+  store volatile i64 %new9, i64 *%ptr
+  store volatile i64 %new10, i64 *%ptr
+  store volatile i64 %new11, i64 *%ptr
+  store volatile i64 %new12, i64 *%ptr
+  store volatile i64 %new13, i64 *%ptr
+  store volatile i64 %new14, i64 *%ptr
+  store volatile i64 %new15, i64 *%ptr
+
+  ret i1 %res
+}
+
+; Check that subtracting -127 from a spilled value can use AGSI.
+define zeroext i1 @f12(i64 *%ptr, i64 %sel) {
+; CHECK-LABEL: f12:
+; CHECK: agsi {{[0-9]+}}(%r15), 127
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i64, i64 *%ptr
+  %val1 = load volatile i64, i64 *%ptr
+  %val2 = load volatile i64, i64 *%ptr
+  %val3 = load volatile i64, i64 *%ptr
+  %val4 = load volatile i64, i64 *%ptr
+  %val5 = load volatile i64, i64 *%ptr
+  %val6 = load volatile i64, i64 *%ptr
+  %val7 = load volatile i64, i64 *%ptr
+  %val8 = load volatile i64, i64 *%ptr
+  %val9 = load volatile i64, i64 *%ptr
+  %val10 = load volatile i64, i64 *%ptr
+  %val11 = load volatile i64, i64 *%ptr
+  %val12 = load volatile i64, i64 *%ptr
+  %val13 = load volatile i64, i64 *%ptr
+  %val14 = load volatile i64, i64 *%ptr
+  %val15 = load volatile i64, i64 *%ptr
+
+  %test = icmp ne i64 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val0, i64 -127)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val1, i64 -127)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val2, i64 -127)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val3, i64 -127)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val4, i64 -127)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val5, i64 -127)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val6, i64 -127)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val7, i64 -127)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val8, i64 -127)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val9, i64 -127)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val10, i64 -127)
+  %add10 = extractvalue {i64, i1} %t10, 0
+  %obit10 = extractvalue {i64, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val11, i64 -127)
+  %add11 = extractvalue {i64, i1} %t11, 0
+  %obit11 = extractvalue {i64, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val12, i64 -127)
+  %add12 = extractvalue {i64, i1} %t12, 0
+  %obit12 = extractvalue {i64, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val13, i64 -127)
+  %add13 = extractvalue {i64, i1} %t13, 0
+  %obit13 = extractvalue {i64, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val14, i64 -127)
+  %add14 = extractvalue {i64, i1} %t14, 0
+  %obit14 = extractvalue {i64, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %val15, i64 -127)
+  %add15 = extractvalue {i64, i1} %t15, 0
+  %obit15 = extractvalue {i64, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i64 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i64 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i64 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i64 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i64 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i64 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i64 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i64 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i64 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i64 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i64 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i64 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i64 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i64 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i64 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i64 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i64 %new0, i64 *%ptr
+  store volatile i64 %new1, i64 *%ptr
+  store volatile i64 %new2, i64 *%ptr
+  store volatile i64 %new3, i64 *%ptr
+  store volatile i64 %new4, i64 *%ptr
+  store volatile i64 %new5, i64 *%ptr
+  store volatile i64 %new6, i64 *%ptr
+  store volatile i64 %new7, i64 *%ptr
+  store volatile i64 %new8, i64 *%ptr
+  store volatile i64 %new9, i64 *%ptr
+  store volatile i64 %new10, i64 *%ptr
+  store volatile i64 %new11, i64 *%ptr
+  store volatile i64 %new12, i64 *%ptr
+  store volatile i64 %new13, i64 *%ptr
+  store volatile i64 %new14, i64 *%ptr
+  store volatile i64 %new15, i64 *%ptr
+
+  ret i1 %res
+}
+
+; Check using the overflow result for a branch.
+define void @f13(i64 *%ptr) {
+; CHECK-LABEL: f13:
+; CHECK: agsi 0(%r2), -1
+; CHECK: jgo foo at PLT
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f14(i64 *%ptr) {
+; CHECK-LABEL: f14:
+; CHECK: agsi 0(%r2), -1
+; CHECK: jgno foo at PLT
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+

Modified: llvm/trunk/test/CodeGen/SystemZ/int-sub-05.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-sub-05.ll?rev=331203&r1=331202&r2=331203&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-sub-05.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/int-sub-05.ll Mon Apr 30 10:54:28 2018
@@ -132,12 +132,14 @@ define void @f8(i128 *%ptr0) {
   %ptr2 = getelementptr i128, i128 *%ptr0, i128 4
   %ptr3 = getelementptr i128, i128 *%ptr0, i128 6
   %ptr4 = getelementptr i128, i128 *%ptr0, i128 8
+  %ptr5 = getelementptr i128, i128 *%ptr0, i128 10
 
   %val0 = load i128 , i128 *%ptr0
   %val1 = load i128 , i128 *%ptr1
   %val2 = load i128 , i128 *%ptr2
   %val3 = load i128 , i128 *%ptr3
   %val4 = load i128 , i128 *%ptr4
+  %val5 = load i128 , i128 *%ptr5
 
   %retptr = call i128 *@foo()
 
@@ -147,7 +149,8 @@ define void @f8(i128 *%ptr0) {
   %sub2 = sub i128 %sub1, %val2
   %sub3 = sub i128 %sub2, %val3
   %sub4 = sub i128 %sub3, %val4
-  store i128 %sub4, i128 *%retptr
+  %sub5 = sub i128 %sub4, %val5
+  store i128 %sub5, i128 *%retptr
 
   ret void
 }

Added: llvm/trunk/test/CodeGen/SystemZ/int-uadd-01.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-uadd-01.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-uadd-01.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-uadd-01.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,314 @@
+; Test 32-bit addition in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check ALR.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: alr %r3, %r4
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: alr %r3, %r4
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: alr %r3, %r4
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check the low end of the AL range.
+define zeroext i1 @f4(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: al %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %b = load i32, i32 *%src
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned AL range.
+define zeroext i1 @f5(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: al %r3, 4092(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 1023
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word up, which should use ALY instead of AL.
+define zeroext i1 @f6(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: aly %r3, 4096(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 1024
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned ALY range.
+define zeroext i1 @f7(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: aly %r3, 524284(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131071
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f8(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: agfi %r4, 524288
+; CHECK: al %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131072
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned ALY range.
+define zeroext i1 @f9(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: aly %r3, -4(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -1
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the ALY range.
+define zeroext i1 @f10(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: aly %r3, -524288(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131072
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f11(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: agfi %r4, -524292
+; CHECK: al %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131073
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that AL allows an index.
+define zeroext i1 @f12(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: al %r4, 4092({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4092
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that ALY allows an index.
+define zeroext i1 @f13(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f13:
+; CHECK: aly %r4, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4096
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that additions of spilled values can use AL rather than ALR.
+define zeroext i1 @f14(i32 *%ptr0) {
+; CHECK-LABEL: f14:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: al %r2, 16{{[04]}}(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+  %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+  %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+  %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+  %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+  %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+  %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+  %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+  %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
+
+  %val0 = load i32, i32 *%ptr0
+  %val1 = load i32, i32 *%ptr1
+  %val2 = load i32, i32 *%ptr2
+  %val3 = load i32, i32 *%ptr3
+  %val4 = load i32, i32 *%ptr4
+  %val5 = load i32, i32 *%ptr5
+  %val6 = load i32, i32 *%ptr6
+  %val7 = load i32, i32 *%ptr7
+  %val8 = load i32, i32 *%ptr8
+  %val9 = load i32, i32 *%ptr9
+
+  %ret = call i32 @foo()
+
+  %t0 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %ret, i32 %val0)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add0, i32 %val1)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add1, i32 %val2)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add2, i32 %val3)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add3, i32 %val4)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add4, i32 %val5)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add5, i32 %val6)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add6, i32 %val7)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add7, i32 %val8)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add8, i32 %val9)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-uadd-02.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-uadd-02.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-uadd-02.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-uadd-02.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,261 @@
+; Test 64-bit addition in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check ALGR.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: algr %r3, %r4
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: algr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: algr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check ALG with no displacement.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: alg %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %b = load i64, i64 *%src
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned ALG range.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: alg %r3, 524280(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 65535
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r4, 524288
+; CHECK: alg %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 65536
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned ALG range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: alg %r3, -8(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -1
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the ALG range.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: alg %r3, -524288(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -65536
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r4, -524296
+; CHECK: alg %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -65537
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that ALG allows an index.
+define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, i64 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: alg %r4, 524280({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: stg %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 524280
+  %ptr = inttoptr i64 %add2 to i64 *
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that additions of spilled values can use ALG rather than ALGR.
+define zeroext i1 @f11(i64 *%ptr0) {
+; CHECK-LABEL: f11:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: alg %r2, 160(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+  %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+  %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+  %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+  %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+  %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+  %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+  %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+  %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
+
+  %val0 = load i64, i64 *%ptr0
+  %val1 = load i64, i64 *%ptr1
+  %val2 = load i64, i64 *%ptr2
+  %val3 = load i64, i64 *%ptr3
+  %val4 = load i64, i64 *%ptr4
+  %val5 = load i64, i64 *%ptr5
+  %val6 = load i64, i64 *%ptr6
+  %val7 = load i64, i64 *%ptr7
+  %val8 = load i64, i64 *%ptr8
+  %val9 = load i64, i64 *%ptr9
+
+  %ret = call i64 @foo()
+
+  %t0 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %ret, i64 %val0)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add0, i64 %val1)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add1, i64 %val2)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add2, i64 %val3)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add3, i64 %val4)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add4, i64 %val5)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add5, i64 %val6)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add6, i64 %val7)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add7, i64 %val8)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add8, i64 %val9)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-uadd-03.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-uadd-03.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-uadd-03.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-uadd-03.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,304 @@
+; Test additions between an i64 and a zero-extended i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check ALGFR.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: algfr %r3, %r4
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: algfr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: algfr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check ALGF with no displacement.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: algf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %b = load i32, i32 *%src
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned ALGF range.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: algf %r3, 524284(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131071
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r4, 524288
+; CHECK: algf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131072
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned ALGF range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: algf %r3, -4(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -1
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the ALGF range.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: algf %r3, -524288(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131072
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r4, -524292
+; CHECK: algf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131073
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that ALGF allows an index.
+define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, i64 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: algf %r4, 524284({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: stg %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 524284
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that additions of spilled values can use ALGF rather than ALGFR.
+define zeroext i1 @f11(i32 *%ptr0) {
+; CHECK-LABEL: f11:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: algf %r2, 160(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+  %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+  %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+  %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+  %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+  %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+  %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+  %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+  %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
+
+  %val0 = load i32, i32 *%ptr0
+  %val1 = load i32, i32 *%ptr1
+  %val2 = load i32, i32 *%ptr2
+  %val3 = load i32, i32 *%ptr3
+  %val4 = load i32, i32 *%ptr4
+  %val5 = load i32, i32 *%ptr5
+  %val6 = load i32, i32 *%ptr6
+  %val7 = load i32, i32 *%ptr7
+  %val8 = load i32, i32 *%ptr8
+  %val9 = load i32, i32 *%ptr9
+
+  %frob0 = add i32 %val0, 100
+  %frob1 = add i32 %val1, 100
+  %frob2 = add i32 %val2, 100
+  %frob3 = add i32 %val3, 100
+  %frob4 = add i32 %val4, 100
+  %frob5 = add i32 %val5, 100
+  %frob6 = add i32 %val6, 100
+  %frob7 = add i32 %val7, 100
+  %frob8 = add i32 %val8, 100
+  %frob9 = add i32 %val9, 100
+
+  store i32 %frob0, i32 *%ptr0
+  store i32 %frob1, i32 *%ptr1
+  store i32 %frob2, i32 *%ptr2
+  store i32 %frob3, i32 *%ptr3
+  store i32 %frob4, i32 *%ptr4
+  store i32 %frob5, i32 *%ptr5
+  store i32 %frob6, i32 *%ptr6
+  store i32 %frob7, i32 *%ptr7
+  store i32 %frob8, i32 *%ptr8
+  store i32 %frob9, i32 *%ptr9
+
+  %ret = call i64 @foo()
+
+  %ext0 = zext i32 %frob0 to i64
+  %ext1 = zext i32 %frob1 to i64
+  %ext2 = zext i32 %frob2 to i64
+  %ext3 = zext i32 %frob3 to i64
+  %ext4 = zext i32 %frob4 to i64
+  %ext5 = zext i32 %frob5 to i64
+  %ext6 = zext i32 %frob6 to i64
+  %ext7 = zext i32 %frob7 to i64
+  %ext8 = zext i32 %frob8 to i64
+  %ext9 = zext i32 %frob9 to i64
+
+  %t0 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %ret, i64 %ext0)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add0, i64 %ext1)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add1, i64 %ext2)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add2, i64 %ext3)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add3, i64 %ext4)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add4, i64 %ext5)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add5, i64 %ext6)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add6, i64 %ext7)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add7, i64 %ext8)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add8, i64 %ext9)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-uadd-04.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-uadd-04.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-uadd-04.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-uadd-04.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,95 @@
+; Test 32-bit addition in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check addition of 1.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: alfi %r3, 1
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the ALFI range.
+define zeroext i1 @f2(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: alfi %r3, 4294967295
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 4294967295)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that negative values are treated as unsigned
+define zeroext i1 @f3(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: alfi %r3, 4294967295
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 -1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f4(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: alfi %r3, 1
+; CHECK: st %r3, 0(%r4)
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f5(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: alfi %r3, 1
+; CHECK: st %r3, 0(%r4)
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-uadd-05.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-uadd-05.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-uadd-05.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-uadd-05.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,112 @@
+; Test 64-bit addition in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check addition of 1.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: algfi %r3, 1
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the ALGFI range.
+define zeroext i1 @f2(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: algfi %r3, 4294967295
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 4294967295)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which must be loaded into a register first.
+define zeroext i1 @f3(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: llihl [[REG1:%r[0-9]+]], 1
+; CHECK: algr [[REG1]], %r3
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 4294967296)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Likewise for negative values.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: lghi [[REG1:%r[0-9]+]], -1
+; CHECK: algr [[REG1]], %r3
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 -1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f5(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: algfi %r3, 1
+; CHECK: stg %r3, 0(%r4)
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f6(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: algfi %r3, 1
+; CHECK: stg %r3, 0(%r4)
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-uadd-06.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-uadd-06.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-uadd-06.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-uadd-06.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,80 @@
+; Test the three-operand form of 32-bit addition.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i32 @foo(i32, i32, i32)
+
+; Check ALRK.
+define i32 @f1(i32 %dummy, i32 %a, i32 %b, i32 *%flag) {
+; CHECK-LABEL: f1:
+; CHECK: alrk %r2, %r3, %r4
+; CHECK: ipm [[REG1:%r[0-5]]]
+; CHECK: risblg [[REG2:%r[0-5]]], [[REG1]], 31, 159, 35
+; CHECK: st [[REG2]], 0(%r5)
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  %ext = zext i1 %obit to i32
+  store i32 %ext, i32 *%flag
+  ret i32 %val
+}
+
+; Check using the overflow result for a branch.
+define i32 @f2(i32 %dummy, i32 %a, i32 %b) {
+; CHECK-LABEL: f2:
+; CHECK: alrk %r2, %r3, %r4
+; CHECK-NEXT: bler %r14
+; CHECK: lhi %r2, 0
+; CHECK: jg foo at PLT
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  br i1 %obit, label %call, label %exit
+
+call:
+  %res = tail call i32 @foo(i32 0, i32 %a, i32 %b)
+  ret i32 %res
+
+exit:
+  ret i32 %val
+}
+
+; ... and the same with the inverted direction.
+define i32 @f3(i32 %dummy, i32 %a, i32 %b) {
+; CHECK-LABEL: f3:
+; CHECK: alrk %r2, %r3, %r4
+; CHECK-NEXT: bnler %r14
+; CHECK: lhi %r2, 0
+; CHECK: jg foo at PLT
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  br i1 %obit, label %exit, label %call
+
+call:
+  %res = tail call i32 @foo(i32 0, i32 %a, i32 %b)
+  ret i32 %res
+
+exit:
+  ret i32 %val
+}
+
+; Check that we can still use ALR in obvious cases.
+define i32 @f4(i32 %a, i32 %b, i32 *%flag) {
+; CHECK-LABEL: f4:
+; CHECK: alr %r2, %r3
+; CHECK: ipm [[REG1:%r[0-5]]]
+; CHECK: risblg [[REG2:%r[0-5]]], [[REG1]], 31, 159, 35
+; CHECK: st [[REG2]], 0(%r4)
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  %ext = zext i1 %obit to i32
+  store i32 %ext, i32 *%flag
+  ret i32 %val
+}
+
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-uadd-07.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-uadd-07.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-uadd-07.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-uadd-07.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,80 @@
+; Test the three-operand form of 64-bit addition.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i64 @foo(i64, i64, i64)
+
+; Check ALGRK.
+define i64 @f1(i64 %dummy, i64 %a, i64 %b, i64 *%flag) {
+; CHECK-LABEL: f1:
+; CHECK: algrk %r2, %r3, %r4
+; CHECK: ipm [[REG1:%r[0-5]]]
+; CHECK: risbg [[REG2:%r[0-5]]], [[REG1]], 63, 191, 35
+; CHECK: stg [[REG2]], 0(%r5)
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  %ext = zext i1 %obit to i64
+  store i64 %ext, i64 *%flag
+  ret i64 %val
+}
+
+; Check using the overflow result for a branch.
+define i64 @f2(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f2:
+; CHECK: algrk %r2, %r3, %r4
+; CHECK-NEXT: bler %r14
+; CHECK: lghi %r2, 0
+; CHECK: jg foo at PLT
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %call, label %exit
+
+call:
+  %res = tail call i64 @foo(i64 0, i64 %a, i64 %b)
+  ret i64 %res
+
+exit:
+  ret i64 %val
+}
+
+; ... and the same with the inverted direction.
+define i64 @f3(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f3:
+; CHECK: algrk %r2, %r3, %r4
+; CHECK-NEXT: bnler %r14
+; CHECK: lghi %r2, 0
+; CHECK: jg foo at PLT
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %exit, label %call
+
+call:
+  %res = tail call i64 @foo(i64 0, i64 %a, i64 %b)
+  ret i64 %res
+
+exit:
+  ret i64 %val
+}
+
+; Check that we can still use ALGR in obvious cases.
+define i64 @f4(i64 %a, i64 %b, i64 *%flag) {
+; CHECK-LABEL: f4:
+; CHECK: algr %r2, %r3
+; CHECK: ipm [[REG1:%r[0-5]]]
+; CHECK: risbg [[REG2:%r[0-5]]], [[REG1]], 63, 191, 35
+; CHECK: stg [[REG2]], 0(%r4)
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  %ext = zext i1 %obit to i64
+  store i64 %ext, i64 *%flag
+  ret i64 %val
+}
+
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-uadd-08.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-uadd-08.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-uadd-08.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-uadd-08.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,142 @@
+; Test 32-bit addition in which the second operand is constant and in which
+; three-operand forms are available.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i32 @foo()
+
+; Check addition of 1.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, 1
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the ALHSIK range.
+define zeroext i1 @f2(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, 32767
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 32767)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which must use ALFI instead.
+define zeroext i1 @f3(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: alfi %r3, 32768
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 32768)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative ALHSIK range.
+define zeroext i1 @f4(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, -1
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 -1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the ALHSIK range.
+define zeroext i1 @f5(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, -32768
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 -32768)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which must use ALFI instead.
+define zeroext i1 @f6(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: alfi %r3, 4294934527
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 -32769)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f7(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, 1
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK: bler %r14
+; CHECK: jg foo at PLT
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f8(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, 1
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK: bnler %r14
+; CHECK: jg foo at PLT
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-uadd-09.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-uadd-09.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-uadd-09.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-uadd-09.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,140 @@
+; Test 64-bit addition in which the second operand is constant and in which
+; three-operand forms are available.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i64 @foo()
+
+; Check additions of 1.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, 1
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the ALGHSIK range.
+define zeroext i1 @f2(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, 32767
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 32767)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which must use ALGFI instead.
+define zeroext i1 @f3(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: algfi %r3, 32768
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 32768)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative ALGHSIK range.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, -1
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 -1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the ALGHSIK range.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, -32768
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 35
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 -32768)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Test the next value down, which cannot use either ALGHSIK or ALGFI.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK-NOT: alghsik
+; CHECK-NOT: algfi
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 -32769)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f7(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, 1
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK: bler %r14
+; CHECK: jg foo at PLT
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f8(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, 1
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK: bnler %r14
+; CHECK: jg foo at PLT
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-uadd-10.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-uadd-10.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-uadd-10.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-uadd-10.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,480 @@
+; Test 32-bit additions of constants to memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check addition of 1.
+define zeroext i1 @f1(i32 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: alsi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the constant range.
+define zeroext i1 @f2(i32 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: alsi 0(%r2), 127
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 127)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next constant up, which must use an addition and a store.
+define zeroext i1 @f3(i32 %dummy, i32 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: l [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: alfi [[VAL]], 128
+; CHECK-DAG: st [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 128)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the constant range.
+define zeroext i1 @f4(i32 *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: alsi 0(%r2), -128
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 -128)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next value down, with the same comment as f3.
+define zeroext i1 @f5(i32 %dummy, i32 *%ptr) {
+; CHECK-LABEL: f5:
+; CHECK: l [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: alfi [[VAL]], 4294967167
+; CHECK-DAG: st [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 -129)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the aligned ALSI range.
+define zeroext i1 @f6(i32 *%base) {
+; CHECK-LABEL: f6:
+; CHECK: alsi 524284(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 131071
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f7(i32 *%base) {
+; CHECK-LABEL: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: alsi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 131072
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the ALSI range.
+define zeroext i1 @f8(i32 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: alsi -524288(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 -131072
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word down, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i32 *%base) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r2, -524292
+; CHECK: alsi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 -131073
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check that ALSI does not allow indices.
+define zeroext i1 @f10(i64 %base, i64 %index) {
+; CHECK-LABEL: f10:
+; CHECK: agr %r2, %r3
+; CHECK: alsi 4(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %add1 = add i64 %base, %index
+  %add2 = add i64 %add1, 4
+  %ptr = inttoptr i64 %add2 to i32 *
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check that adding 127 to a spilled value can use ALSI.
+define zeroext i1 @f11(i32 *%ptr, i32 %sel) {
+; CHECK-LABEL: f11:
+; CHECK: alsi {{[0-9]+}}(%r15), 127
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i32, i32 *%ptr
+  %val1 = load volatile i32, i32 *%ptr
+  %val2 = load volatile i32, i32 *%ptr
+  %val3 = load volatile i32, i32 *%ptr
+  %val4 = load volatile i32, i32 *%ptr
+  %val5 = load volatile i32, i32 *%ptr
+  %val6 = load volatile i32, i32 *%ptr
+  %val7 = load volatile i32, i32 *%ptr
+  %val8 = load volatile i32, i32 *%ptr
+  %val9 = load volatile i32, i32 *%ptr
+  %val10 = load volatile i32, i32 *%ptr
+  %val11 = load volatile i32, i32 *%ptr
+  %val12 = load volatile i32, i32 *%ptr
+  %val13 = load volatile i32, i32 *%ptr
+  %val14 = load volatile i32, i32 *%ptr
+  %val15 = load volatile i32, i32 *%ptr
+
+  %test = icmp ne i32 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val0, i32 127)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val1, i32 127)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val2, i32 127)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val3, i32 127)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val4, i32 127)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val5, i32 127)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val6, i32 127)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val7, i32 127)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val8, i32 127)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val9, i32 127)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val10, i32 127)
+  %add10 = extractvalue {i32, i1} %t10, 0
+  %obit10 = extractvalue {i32, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val11, i32 127)
+  %add11 = extractvalue {i32, i1} %t11, 0
+  %obit11 = extractvalue {i32, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val12, i32 127)
+  %add12 = extractvalue {i32, i1} %t12, 0
+  %obit12 = extractvalue {i32, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val13, i32 127)
+  %add13 = extractvalue {i32, i1} %t13, 0
+  %obit13 = extractvalue {i32, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val14, i32 127)
+  %add14 = extractvalue {i32, i1} %t14, 0
+  %obit14 = extractvalue {i32, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val15, i32 127)
+  %add15 = extractvalue {i32, i1} %t15, 0
+  %obit15 = extractvalue {i32, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i32 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i32 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i32 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i32 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i32 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i32 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i32 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i32 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i32 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i32 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i32 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i32 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i32 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i32 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i32 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i32 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i32 %new0, i32 *%ptr
+  store volatile i32 %new1, i32 *%ptr
+  store volatile i32 %new2, i32 *%ptr
+  store volatile i32 %new3, i32 *%ptr
+  store volatile i32 %new4, i32 *%ptr
+  store volatile i32 %new5, i32 *%ptr
+  store volatile i32 %new6, i32 *%ptr
+  store volatile i32 %new7, i32 *%ptr
+  store volatile i32 %new8, i32 *%ptr
+  store volatile i32 %new9, i32 *%ptr
+  store volatile i32 %new10, i32 *%ptr
+  store volatile i32 %new11, i32 *%ptr
+  store volatile i32 %new12, i32 *%ptr
+  store volatile i32 %new13, i32 *%ptr
+  store volatile i32 %new14, i32 *%ptr
+  store volatile i32 %new15, i32 *%ptr
+
+  ret i1 %res
+}
+
+; Check that adding -128 to a spilled value can use ALSI.
+define zeroext i1 @f12(i32 *%ptr, i32 %sel) {
+; CHECK-LABEL: f12:
+; CHECK: alsi {{[0-9]+}}(%r15), -128
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i32, i32 *%ptr
+  %val1 = load volatile i32, i32 *%ptr
+  %val2 = load volatile i32, i32 *%ptr
+  %val3 = load volatile i32, i32 *%ptr
+  %val4 = load volatile i32, i32 *%ptr
+  %val5 = load volatile i32, i32 *%ptr
+  %val6 = load volatile i32, i32 *%ptr
+  %val7 = load volatile i32, i32 *%ptr
+  %val8 = load volatile i32, i32 *%ptr
+  %val9 = load volatile i32, i32 *%ptr
+  %val10 = load volatile i32, i32 *%ptr
+  %val11 = load volatile i32, i32 *%ptr
+  %val12 = load volatile i32, i32 *%ptr
+  %val13 = load volatile i32, i32 *%ptr
+  %val14 = load volatile i32, i32 *%ptr
+  %val15 = load volatile i32, i32 *%ptr
+
+  %test = icmp ne i32 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val0, i32 -128)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val1, i32 -128)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val2, i32 -128)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val3, i32 -128)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val4, i32 -128)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val5, i32 -128)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val6, i32 -128)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val7, i32 -128)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val8, i32 -128)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val9, i32 -128)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val10, i32 -128)
+  %add10 = extractvalue {i32, i1} %t10, 0
+  %obit10 = extractvalue {i32, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val11, i32 -128)
+  %add11 = extractvalue {i32, i1} %t11, 0
+  %obit11 = extractvalue {i32, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val12, i32 -128)
+  %add12 = extractvalue {i32, i1} %t12, 0
+  %obit12 = extractvalue {i32, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val13, i32 -128)
+  %add13 = extractvalue {i32, i1} %t13, 0
+  %obit13 = extractvalue {i32, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val14, i32 -128)
+  %add14 = extractvalue {i32, i1} %t14, 0
+  %obit14 = extractvalue {i32, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val15, i32 -128)
+  %add15 = extractvalue {i32, i1} %t15, 0
+  %obit15 = extractvalue {i32, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i32 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i32 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i32 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i32 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i32 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i32 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i32 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i32 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i32 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i32 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i32 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i32 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i32 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i32 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i32 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i32 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i32 %new0, i32 *%ptr
+  store volatile i32 %new1, i32 *%ptr
+  store volatile i32 %new2, i32 *%ptr
+  store volatile i32 %new3, i32 *%ptr
+  store volatile i32 %new4, i32 *%ptr
+  store volatile i32 %new5, i32 *%ptr
+  store volatile i32 %new6, i32 *%ptr
+  store volatile i32 %new7, i32 *%ptr
+  store volatile i32 %new8, i32 *%ptr
+  store volatile i32 %new9, i32 *%ptr
+  store volatile i32 %new10, i32 *%ptr
+  store volatile i32 %new11, i32 *%ptr
+  store volatile i32 %new12, i32 *%ptr
+  store volatile i32 %new13, i32 *%ptr
+  store volatile i32 %new14, i32 *%ptr
+  store volatile i32 %new15, i32 *%ptr
+
+  ret i1 %res
+}
+
+; Check using the overflow result for a branch.
+define void @f13(i32 *%ptr) {
+; CHECK-LABEL: f13:
+; CHECK: alsi 0(%r2), 1
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f14(i32 *%ptr) {
+; CHECK-LABEL: f14:
+; CHECK: alsi 0(%r2), 1
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-uadd-11.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-uadd-11.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-uadd-11.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-uadd-11.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,349 @@
+; Test 64-bit additions of constants to memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check addition of 1.
+define zeroext i1 @f1(i64 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: algsi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the constant range.
+define zeroext i1 @f2(i64 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: algsi 0(%r2), 127
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 127)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next constant up, which must use an addition and a store.
+define zeroext i1 @f3(i64 %dummy, i64 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: lg [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: algfi [[VAL]], 128
+; CHECK-DAG: stg [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 128)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the constant range.
+define zeroext i1 @f4(i64 *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: algsi 0(%r2), -128
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 -128)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next value down, with the same comment as f3.
+define zeroext i1 @f5(i64 %dummy, i64 *%ptr) {
+; CHECK-LABEL: f5:
+; CHECK: lg [[VAL1:%r[0-5]]], 0(%r3)
+; CHECK: lghi [[VAL2:%r[0-9]+]], -129
+; CHECK: algr [[VAL2]], [[VAL1]]
+; CHECK-DAG: stg [[VAL2]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 -129)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the aligned ALGSI range.
+define zeroext i1 @f6(i64 *%base) {
+; CHECK-LABEL: f6:
+; CHECK: algsi 524280(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 65535
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f7(i64 *%base) {
+; CHECK-LABEL: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: algsi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 65536
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the ALGSI range.
+define zeroext i1 @f8(i64 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: algsi -524288(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 -65536
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word down, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 *%base) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r2, -524296
+; CHECK: algsi 0(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 -65537
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check that ALGSI does not allow indices.
+define zeroext i1 @f10(i64 %base, i64 %index) {
+; CHECK-LABEL: f10:
+; CHECK: agr %r2, %r3
+; CHECK: algsi 8(%r2), 1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: risbg %r2, [[REG]], 63, 191, 35
+; CHECK: br %r14
+  %add1 = add i64 %base, %index
+  %add2 = add i64 %add1, 8
+  %ptr = inttoptr i64 %add2 to i64 *
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check that adding 127 to a spilled value can use ALGSI.
+define zeroext i1 @f11(i64 *%ptr, i64 %sel) {
+; CHECK-LABEL: f11:
+; CHECK: algsi {{[0-9]+}}(%r15), 127
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i64, i64 *%ptr
+  %val1 = load volatile i64, i64 *%ptr
+  %val2 = load volatile i64, i64 *%ptr
+  %val3 = load volatile i64, i64 *%ptr
+  %val4 = load volatile i64, i64 *%ptr
+  %val5 = load volatile i64, i64 *%ptr
+  %val6 = load volatile i64, i64 *%ptr
+  %val7 = load volatile i64, i64 *%ptr
+  %val8 = load volatile i64, i64 *%ptr
+  %val9 = load volatile i64, i64 *%ptr
+  %val10 = load volatile i64, i64 *%ptr
+  %val11 = load volatile i64, i64 *%ptr
+  %val12 = load volatile i64, i64 *%ptr
+  %val13 = load volatile i64, i64 *%ptr
+  %val14 = load volatile i64, i64 *%ptr
+  %val15 = load volatile i64, i64 *%ptr
+
+  %test = icmp ne i64 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val0, i64 127)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val1, i64 127)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val2, i64 127)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val3, i64 127)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val4, i64 127)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val5, i64 127)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val6, i64 127)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val7, i64 127)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val8, i64 127)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val9, i64 127)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val10, i64 127)
+  %add10 = extractvalue {i64, i1} %t10, 0
+  %obit10 = extractvalue {i64, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val11, i64 127)
+  %add11 = extractvalue {i64, i1} %t11, 0
+  %obit11 = extractvalue {i64, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val12, i64 127)
+  %add12 = extractvalue {i64, i1} %t12, 0
+  %obit12 = extractvalue {i64, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val13, i64 127)
+  %add13 = extractvalue {i64, i1} %t13, 0
+  %obit13 = extractvalue {i64, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val14, i64 127)
+  %add14 = extractvalue {i64, i1} %t14, 0
+  %obit14 = extractvalue {i64, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val15, i64 127)
+  %add15 = extractvalue {i64, i1} %t15, 0
+  %obit15 = extractvalue {i64, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i64 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i64 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i64 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i64 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i64 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i64 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i64 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i64 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i64 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i64 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i64 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i64 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i64 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i64 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i64 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i64 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i64 %new0, i64 *%ptr
+  store volatile i64 %new1, i64 *%ptr
+  store volatile i64 %new2, i64 *%ptr
+  store volatile i64 %new3, i64 *%ptr
+  store volatile i64 %new4, i64 *%ptr
+  store volatile i64 %new5, i64 *%ptr
+  store volatile i64 %new6, i64 *%ptr
+  store volatile i64 %new7, i64 *%ptr
+  store volatile i64 %new8, i64 *%ptr
+  store volatile i64 %new9, i64 *%ptr
+  store volatile i64 %new10, i64 *%ptr
+  store volatile i64 %new11, i64 *%ptr
+  store volatile i64 %new12, i64 *%ptr
+  store volatile i64 %new13, i64 *%ptr
+  store volatile i64 %new14, i64 *%ptr
+  store volatile i64 %new15, i64 *%ptr
+
+  ret i1 %res
+}
+
+; Check using the overflow result for a branch.
+define void @f12(i64 *%ptr) {
+; CHECK-LABEL: f12:
+; CHECK: algsi 0(%r2), 1
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f13(i64 *%ptr) {
+; CHECK-LABEL: f13:
+; CHECK: algsi 0(%r2), 1
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-usub-01.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-usub-01.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-usub-01.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-usub-01.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,325 @@
+; Test 32-bit subtraction in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check SLR.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: slr %r3, %r4
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: slr %r3, %r4
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i32 %dummy, i32 %a, i32 %b, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: slr %r3, %r4
+; CHECK: st %r3, 0(%r5)
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check the low end of the SL range.
+define zeroext i1 @f4(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: sl %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %b = load i32, i32 *%src
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned SL range.
+define zeroext i1 @f5(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: sl %r3, 4092(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 1023
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word up, which should use SLY instead of SL.
+define zeroext i1 @f6(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: sly %r3, 4096(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 1024
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned SLY range.
+define zeroext i1 @f7(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: sly %r3, 524284(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131071
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f8(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: agfi %r4, 524288
+; CHECK: sl %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131072
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned SLY range.
+define zeroext i1 @f9(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: sly %r3, -4(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -1
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the SLY range.
+define zeroext i1 @f10(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: sly %r3, -524288(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131072
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f11(i32 %dummy, i32 %a, i32 *%src, i32 *%res) {
+; CHECK-LABEL: f11:
+; CHECK: agfi %r4, -524292
+; CHECK: sl %r3, 0(%r4)
+; CHECK-DAG: st %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131073
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that SL allows an index.
+define zeroext i1 @f12(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f12:
+; CHECK: sl %r4, 4092({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4092
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that SLY allows an index.
+define zeroext i1 @f13(i64 %src, i64 %index, i32 %a, i32 *%res) {
+; CHECK-LABEL: f13:
+; CHECK: sly %r4, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: st %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 4096
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that subtractions of spilled values can use SL rather than SLR.
+define zeroext i1 @f14(i32 *%ptr0) {
+; CHECK-LABEL: f14:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: sl %r2, 16{{[04]}}(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+  %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+  %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+  %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+  %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+  %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+  %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+  %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+  %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
+
+  %val0 = load i32, i32 *%ptr0
+  %val1 = load i32, i32 *%ptr1
+  %val2 = load i32, i32 *%ptr2
+  %val3 = load i32, i32 *%ptr3
+  %val4 = load i32, i32 *%ptr4
+  %val5 = load i32, i32 *%ptr5
+  %val6 = load i32, i32 *%ptr6
+  %val7 = load i32, i32 *%ptr7
+  %val8 = load i32, i32 *%ptr8
+  %val9 = load i32, i32 *%ptr9
+
+  %ret = call i32 @foo()
+
+  %t0 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %ret, i32 %val0)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %add0, i32 %val1)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %add1, i32 %val2)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %add2, i32 %val3)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %add3, i32 %val4)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %add4, i32 %val5)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %add5, i32 %val6)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %add6, i32 %val7)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %add7, i32 %val8)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %add8, i32 %val9)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-usub-02.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-usub-02.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-usub-02.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-usub-02.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,269 @@
+; Test 64-bit subtraction in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check SLGR.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: slgr %r3, %r4
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: slgr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i64 %dummy, i64 %a, i64 %b, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: slgr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check SLG with no displacement.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: slg %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %b = load i64, i64 *%src
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned SLG range.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: slg %r3, 524280(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 65535
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r4, 524288
+; CHECK: slg %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 65536
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned SLG range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: slg %r3, -8(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -1
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the SLG range.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: slg %r3, -524288(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -65536
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 %dummy, i64 %a, i64 *%src, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r4, -524296
+; CHECK: slg %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%src, i64 -65537
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that SLG allows an index.
+define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, i64 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: slg %r4, 524280({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: stg %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 524280
+  %ptr = inttoptr i64 %add2 to i64 *
+  %b = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that subtractions of spilled values can use SLG rather than SLGR.
+define zeroext i1 @f11(i64 *%ptr0) {
+; CHECK-LABEL: f11:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: slg %r2, 160(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+  %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+  %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+  %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+  %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+  %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+  %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+  %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+  %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
+
+  %val0 = load i64, i64 *%ptr0
+  %val1 = load i64, i64 *%ptr1
+  %val2 = load i64, i64 *%ptr2
+  %val3 = load i64, i64 *%ptr3
+  %val4 = load i64, i64 *%ptr4
+  %val5 = load i64, i64 *%ptr5
+  %val6 = load i64, i64 *%ptr6
+  %val7 = load i64, i64 *%ptr7
+  %val8 = load i64, i64 *%ptr8
+  %val9 = load i64, i64 *%ptr9
+
+  %ret = call i64 @foo()
+
+  %t0 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %ret, i64 %val0)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add0, i64 %val1)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add1, i64 %val2)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add2, i64 %val3)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add3, i64 %val4)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add4, i64 %val5)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add5, i64 %val6)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add6, i64 %val7)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add7, i64 %val8)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add8, i64 %val9)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-usub-03.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-usub-03.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-usub-03.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-usub-03.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,312 @@
+; Test subtraction of a zero-extended i32 from an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check SLGFR.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: slgfr %r3, %r4
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f2(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: slgfr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f3(i64 %dummy, i64 %a, i32 %b, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: slgfr %r3, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Check SLGF with no displacement.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: slgf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %b = load i32, i32 *%src
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the aligned SLGF range.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: slgf %r3, 524284(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131071
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: agfi %r4, 524288
+; CHECK: slgf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 131072
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative aligned SLGF range.
+define zeroext i1 @f7(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: slgf %r3, -4(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -1
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the SLGF range.
+define zeroext i1 @f8(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: slgf %r3, -524288(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131072
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 %dummy, i64 %a, i32 *%src, i64 *%res) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r4, -524292
+; CHECK: slgf %r3, 0(%r4)
+; CHECK-DAG: stg %r3, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%src, i64 -131073
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that SLGF allows an index.
+define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, i64 *%res) {
+; CHECK-LABEL: f10:
+; CHECK: slgf %r4, 524284({{%r3,%r2|%r2,%r3}})
+; CHECK-DAG: stg %r4, 0(%r5)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %src, %index
+  %add2 = add i64 %add1, 524284
+  %ptr = inttoptr i64 %add2 to i32 *
+  %b = load i32, i32 *%ptr
+  %bext = zext i32 %b to i64
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check that subtractions of spilled values can use SLGF rather than SLGFR.
+define zeroext i1 @f11(i32 *%ptr0) {
+; CHECK-LABEL: f11:
+; CHECK: brasl %r14, foo at PLT
+; CHECK: slgf %r2, 160(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+  %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+  %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+  %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+  %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+  %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+  %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+  %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+  %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
+
+  %val0 = load i32, i32 *%ptr0
+  %val1 = load i32, i32 *%ptr1
+  %val2 = load i32, i32 *%ptr2
+  %val3 = load i32, i32 *%ptr3
+  %val4 = load i32, i32 *%ptr4
+  %val5 = load i32, i32 *%ptr5
+  %val6 = load i32, i32 *%ptr6
+  %val7 = load i32, i32 *%ptr7
+  %val8 = load i32, i32 *%ptr8
+  %val9 = load i32, i32 *%ptr9
+
+  %frob0 = add i32 %val0, 100
+  %frob1 = add i32 %val1, 100
+  %frob2 = add i32 %val2, 100
+  %frob3 = add i32 %val3, 100
+  %frob4 = add i32 %val4, 100
+  %frob5 = add i32 %val5, 100
+  %frob6 = add i32 %val6, 100
+  %frob7 = add i32 %val7, 100
+  %frob8 = add i32 %val8, 100
+  %frob9 = add i32 %val9, 100
+
+  store i32 %frob0, i32 *%ptr0
+  store i32 %frob1, i32 *%ptr1
+  store i32 %frob2, i32 *%ptr2
+  store i32 %frob3, i32 *%ptr3
+  store i32 %frob4, i32 *%ptr4
+  store i32 %frob5, i32 *%ptr5
+  store i32 %frob6, i32 *%ptr6
+  store i32 %frob7, i32 *%ptr7
+  store i32 %frob8, i32 *%ptr8
+  store i32 %frob9, i32 *%ptr9
+
+  %ret = call i64 @foo()
+
+  %ext0 = zext i32 %frob0 to i64
+  %ext1 = zext i32 %frob1 to i64
+  %ext2 = zext i32 %frob2 to i64
+  %ext3 = zext i32 %frob3 to i64
+  %ext4 = zext i32 %frob4 to i64
+  %ext5 = zext i32 %frob5 to i64
+  %ext6 = zext i32 %frob6 to i64
+  %ext7 = zext i32 %frob7 to i64
+  %ext8 = zext i32 %frob8 to i64
+  %ext9 = zext i32 %frob9 to i64
+
+  %t0 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %ret, i64 %ext0)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add0, i64 %ext1)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add1, i64 %ext2)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add2, i64 %ext3)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add3, i64 %ext4)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add4, i64 %ext5)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add5, i64 %ext6)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add6, i64 %ext7)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add7, i64 %ext8)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add8, i64 %ext9)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+
+  ret i1 %res9
+}
+
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-usub-04.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-usub-04.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-usub-04.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-usub-04.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,98 @@
+; Test 32-bit subtraction in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check subtraction of 1.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: slfi %r3, 1
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the SLFI range.
+define zeroext i1 @f2(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: slfi %r3, 4294967295
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 4294967295)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check that negative values are treated as unsigned
+define zeroext i1 @f3(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: slfi %r3, 4294967295
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 -1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f4(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: slfi %r3, 1
+; CHECK: st %r3, 0(%r4)
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f5(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: slfi %r3, 1
+; CHECK: st %r3, 0(%r4)
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-usub-05.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-usub-05.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-usub-05.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-usub-05.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,116 @@
+; Test 64-bit subtraction in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check addition of 1.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: slgfi %r3, 1
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the SLGFI range.
+define zeroext i1 @f2(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: slgfi %r3, 4294967295
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 4294967295)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which must be loaded into a register first.
+define zeroext i1 @f3(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: llihl [[REG1:%r[0-9]+]], 1
+; CHECK: slgr %r3, [[REG1]]
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 4294967296)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Likewise for negative values.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: lghi [[REG1:%r[0-9]+]], -1
+; CHECK: slgr %r3, [[REG1]]
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 -1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f5(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: slgfi %r3, 1
+; CHECK: stg %r3, 0(%r4)
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f6(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: slgfi %r3, 1
+; CHECK: stg %r3, 0(%r4)
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-usub-06.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-usub-06.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-usub-06.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-usub-06.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,82 @@
+; Test the three-operand form of 32-bit subtraction.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i32 @foo(i32, i32, i32)
+
+; Check SLRK.
+define i32 @f1(i32 %dummy, i32 %a, i32 %b, i32 *%flag) {
+; CHECK-LABEL: f1:
+; CHECK: slrk %r2, %r3, %r4
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: srl [[REG]], 31
+; CHECK: st [[REG]], 0(%r5)
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  %ext = zext i1 %obit to i32
+  store i32 %ext, i32 *%flag
+  ret i32 %val
+}
+
+; Check using the overflow result for a branch.
+define i32 @f2(i32 %dummy, i32 %a, i32 %b) {
+; CHECK-LABEL: f2:
+; CHECK: slrk %r2, %r3, %r4
+; CHECK-NEXT: bnler %r14
+; CHECK: lhi %r2, 0
+; CHECK: jg foo at PLT
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  br i1 %obit, label %call, label %exit
+
+call:
+  %res = tail call i32 @foo(i32 0, i32 %a, i32 %b)
+  ret i32 %res
+
+exit:
+  ret i32 %val
+}
+
+; ... and the same with the inverted direction.
+define i32 @f3(i32 %dummy, i32 %a, i32 %b) {
+; CHECK-LABEL: f3:
+; CHECK: slrk %r2, %r3, %r4
+; CHECK-NEXT: bler %r14
+; CHECK: lhi %r2, 0
+; CHECK: jg foo at PLT
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  br i1 %obit, label %exit, label %call
+
+call:
+  %res = tail call i32 @foo(i32 0, i32 %a, i32 %b)
+  ret i32 %res
+
+exit:
+  ret i32 %val
+}
+
+; Check that we can still use SLR in obvious cases.
+define i32 @f4(i32 %a, i32 %b, i32 *%flag) {
+; CHECK-LABEL: f4:
+; CHECK: slr %r2, %r3
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: srl [[REG]], 31
+; CHECK: st [[REG]], 0(%r4)
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  %ext = zext i1 %obit to i32
+  store i32 %ext, i32 *%flag
+  ret i32 %val
+}
+
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-usub-07.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-usub-07.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-usub-07.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-usub-07.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,82 @@
+; Test the three-operand form of 64-bit addition.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i64 @foo(i64, i64, i64)
+
+; Check SLGRK.
+define i64 @f1(i64 %dummy, i64 %a, i64 %b, i64 *%flag) {
+; CHECK-LABEL: f1:
+; CHECK: slgrk %r2, %r3, %r4
+; CHECK: ipm [[REG1:%r[0-5]]]
+; CHECK: afi [[REG1]], -536870912
+; CHECK: risbg [[REG2:%r[0-5]]], [[REG1]], 63, 191, 33
+; CHECK: stg [[REG2]], 0(%r5)
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  %ext = zext i1 %obit to i64
+  store i64 %ext, i64 *%flag
+  ret i64 %val
+}
+
+; Check using the overflow result for a branch.
+define i64 @f2(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f2:
+; CHECK: slgrk %r2, %r3, %r4
+; CHECK-NEXT: bnler %r14
+; CHECK: lghi %r2, 0
+; CHECK: jg foo at PLT
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %call, label %exit
+
+call:
+  %res = tail call i64 @foo(i64 0, i64 %a, i64 %b)
+  ret i64 %res
+
+exit:
+  ret i64 %val
+}
+
+; ... and the same with the inverted direction.
+define i64 @f3(i64 %dummy, i64 %a, i64 %b) {
+; CHECK-LABEL: f3:
+; CHECK: slgrk %r2, %r3, %r4
+; CHECK-NEXT: bler %r14
+; CHECK: lghi %r2, 0
+; CHECK: jg foo at PLT
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %exit, label %call
+
+call:
+  %res = tail call i64 @foo(i64 0, i64 %a, i64 %b)
+  ret i64 %res
+
+exit:
+  ret i64 %val
+}
+
+; Check that we can still use SLGR in obvious cases.
+define i64 @f4(i64 %a, i64 %b, i64 *%flag) {
+; CHECK-LABEL: f4:
+; CHECK: slgr %r2, %r3
+; CHECK: ipm [[REG1:%r[0-5]]]
+; CHECK: afi [[REG1]], -536870912
+; CHECK: risbg [[REG2:%r[0-5]]], [[REG1]], 63, 191, 33
+; CHECK: stg [[REG2]], 0(%r4)
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  %ext = zext i1 %obit to i64
+  store i64 %ext, i64 *%flag
+  ret i64 %val
+}
+
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-usub-08.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-usub-08.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-usub-08.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-usub-08.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,148 @@
+; Test 32-bit subtraction in which the second operand is constant and in which
+; three-operand forms are available.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i32 @foo()
+
+; Check subtraction of 1.
+define zeroext i1 @f1(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, -1
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG2]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the ALHSIK range.
+define zeroext i1 @f2(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, -32768
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG2]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 32768)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which must use SLFI instead.
+define zeroext i1 @f3(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: slfi %r3, 32769
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG2]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 32769)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative ALHSIK range.
+define zeroext i1 @f4(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, 1
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG2]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 -1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the ALHSIK range.
+define zeroext i1 @f5(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, 32767
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG2]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 -32767)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check the next value down, which must use SLFI instead.
+define zeroext i1 @f6(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f6:
+; CHECK: slfi %r3, 4294934528
+; CHECK-DAG: st %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG2]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 -32768)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f7(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, -1
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK: bnler %r14
+; CHECK: jg foo at PLT
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f8(i32 %dummy, i32 %a, i32 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: alhsik [[REG1:%r[0-5]]], %r3, -1
+; CHECK-DAG: st [[REG1]], 0(%r4)
+; CHECK: bler %r14
+; CHECK: jg foo at PLT
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-usub-09.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-usub-09.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-usub-09.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-usub-09.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,145 @@
+; Test 64-bit addition in which the second operand is constant and in which
+; three-operand forms are available.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare i64 @foo()
+
+; Check subtraction of 1.
+define zeroext i1 @f1(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f1:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, -1
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG2]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the ALGHSIK range.
+define zeroext i1 @f2(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f2:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, -32768
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG2]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 32768)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the next value up, which must use SLGFI instead.
+define zeroext i1 @f3(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f3:
+; CHECK: slgfi %r3, 32769
+; CHECK-DAG: stg %r3, 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG2]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 32769)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the high end of the negative ALGHSIK range.
+define zeroext i1 @f4(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f4:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, 1
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG2]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 -1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check the low end of the ALGHSIK range.
+define zeroext i1 @f5(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f5:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, 32767
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK-DAG: ipm [[REG2:%r[0-5]]]
+; CHECK-DAG: afi [[REG2]], -536870912
+; CHECK-DAG: risbg %r2, [[REG2]], 63, 191, 33
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 -32767)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Test the next value down, which cannot use either ALGHSIK or SLGFI.
+define zeroext i1 @f6(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f6:
+; CHECK-NOT: alghsik
+; CHECK-NOT: slgfi
+; CHECK: br %r14
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 -32768)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  ret i1 %obit
+}
+
+; Check using the overflow result for a branch.
+define void @f7(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f7:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, -1
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK: bnler %r14
+; CHECK: jg foo at PLT
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f8(i64 %dummy, i64 %a, i64 *%res) {
+; CHECK-LABEL: f8:
+; CHECK: alghsik [[REG1:%r[0-5]]], %r3, -1
+; CHECK-DAG: stg [[REG1]], 0(%r4)
+; CHECK: bler %r14
+; CHECK: jg foo at PLT
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%res
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-usub-10.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-usub-10.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-usub-10.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-usub-10.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,490 @@
+; Test 32-bit subtractions of constants from memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @foo()
+
+; Check subtraction of 1.
+define zeroext i1 @f1(i32 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: alsi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the constant range.
+define zeroext i1 @f2(i32 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: alsi 0(%r2), -128
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 128)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next constant up, which must use a subtraction and a store.
+define zeroext i1 @f3(i32 %dummy, i32 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: l [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: slfi [[VAL]], 129
+; CHECK-DAG: st [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 129)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the constant range.
+define zeroext i1 @f4(i32 *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: alsi 0(%r2), 127
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 -127)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next value down, with the same comment as f3.
+define zeroext i1 @f5(i32 %dummy, i32 *%ptr) {
+; CHECK-LABEL: f5:
+; CHECK: l [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: slfi [[VAL]], 4294967168
+; CHECK-DAG: st [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 -128)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the aligned ASI range.
+define zeroext i1 @f6(i32 *%base) {
+; CHECK-LABEL: f6:
+; CHECK: alsi 524284(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 131071
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f7(i32 *%base) {
+; CHECK-LABEL: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: alsi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 131072
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the ALSI range.
+define zeroext i1 @f8(i32 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: alsi -524288(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 -131072
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word down, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i32 *%base) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r2, -524292
+; CHECK: alsi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i32, i32 *%base, i64 -131073
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check that ALSI does not allow indices.
+define zeroext i1 @f10(i64 %base, i64 %index) {
+; CHECK-LABEL: f10:
+; CHECK: agr %r2, %r3
+; CHECK: alsi 4(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %base, %index
+  %add2 = add i64 %add1, 4
+  %ptr = inttoptr i64 %add2 to i32 *
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  ret i1 %obit
+}
+
+; Check that subtracting 128 from a spilled value can use ALSI.
+define zeroext i1 @f11(i32 *%ptr, i32 %sel) {
+; CHECK-LABEL: f11:
+; CHECK: alsi {{[0-9]+}}(%r15), -128
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i32, i32 *%ptr
+  %val1 = load volatile i32, i32 *%ptr
+  %val2 = load volatile i32, i32 *%ptr
+  %val3 = load volatile i32, i32 *%ptr
+  %val4 = load volatile i32, i32 *%ptr
+  %val5 = load volatile i32, i32 *%ptr
+  %val6 = load volatile i32, i32 *%ptr
+  %val7 = load volatile i32, i32 *%ptr
+  %val8 = load volatile i32, i32 *%ptr
+  %val9 = load volatile i32, i32 *%ptr
+  %val10 = load volatile i32, i32 *%ptr
+  %val11 = load volatile i32, i32 *%ptr
+  %val12 = load volatile i32, i32 *%ptr
+  %val13 = load volatile i32, i32 *%ptr
+  %val14 = load volatile i32, i32 *%ptr
+  %val15 = load volatile i32, i32 *%ptr
+
+  %test = icmp ne i32 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val0, i32 128)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val1, i32 128)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val2, i32 128)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val3, i32 128)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val4, i32 128)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val5, i32 128)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val6, i32 128)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val7, i32 128)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val8, i32 128)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val9, i32 128)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val10, i32 128)
+  %add10 = extractvalue {i32, i1} %t10, 0
+  %obit10 = extractvalue {i32, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val11, i32 128)
+  %add11 = extractvalue {i32, i1} %t11, 0
+  %obit11 = extractvalue {i32, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val12, i32 128)
+  %add12 = extractvalue {i32, i1} %t12, 0
+  %obit12 = extractvalue {i32, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val13, i32 128)
+  %add13 = extractvalue {i32, i1} %t13, 0
+  %obit13 = extractvalue {i32, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val14, i32 128)
+  %add14 = extractvalue {i32, i1} %t14, 0
+  %obit14 = extractvalue {i32, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val15, i32 128)
+  %add15 = extractvalue {i32, i1} %t15, 0
+  %obit15 = extractvalue {i32, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i32 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i32 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i32 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i32 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i32 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i32 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i32 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i32 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i32 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i32 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i32 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i32 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i32 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i32 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i32 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i32 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i32 %new0, i32 *%ptr
+  store volatile i32 %new1, i32 *%ptr
+  store volatile i32 %new2, i32 *%ptr
+  store volatile i32 %new3, i32 *%ptr
+  store volatile i32 %new4, i32 *%ptr
+  store volatile i32 %new5, i32 *%ptr
+  store volatile i32 %new6, i32 *%ptr
+  store volatile i32 %new7, i32 *%ptr
+  store volatile i32 %new8, i32 *%ptr
+  store volatile i32 %new9, i32 *%ptr
+  store volatile i32 %new10, i32 *%ptr
+  store volatile i32 %new11, i32 *%ptr
+  store volatile i32 %new12, i32 *%ptr
+  store volatile i32 %new13, i32 *%ptr
+  store volatile i32 %new14, i32 *%ptr
+  store volatile i32 %new15, i32 *%ptr
+
+  ret i1 %res
+}
+
+; Check that subtracting -127 from a spilled value can use ALSI.
+define zeroext i1 @f12(i32 *%ptr, i32 %sel) {
+; CHECK-LABEL: f12:
+; CHECK: alsi {{[0-9]+}}(%r15), 127
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i32, i32 *%ptr
+  %val1 = load volatile i32, i32 *%ptr
+  %val2 = load volatile i32, i32 *%ptr
+  %val3 = load volatile i32, i32 *%ptr
+  %val4 = load volatile i32, i32 *%ptr
+  %val5 = load volatile i32, i32 *%ptr
+  %val6 = load volatile i32, i32 *%ptr
+  %val7 = load volatile i32, i32 *%ptr
+  %val8 = load volatile i32, i32 *%ptr
+  %val9 = load volatile i32, i32 *%ptr
+  %val10 = load volatile i32, i32 *%ptr
+  %val11 = load volatile i32, i32 *%ptr
+  %val12 = load volatile i32, i32 *%ptr
+  %val13 = load volatile i32, i32 *%ptr
+  %val14 = load volatile i32, i32 *%ptr
+  %val15 = load volatile i32, i32 *%ptr
+
+  %test = icmp ne i32 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val0, i32 -127)
+  %add0 = extractvalue {i32, i1} %t0, 0
+  %obit0 = extractvalue {i32, i1} %t0, 1
+  %t1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val1, i32 -127)
+  %add1 = extractvalue {i32, i1} %t1, 0
+  %obit1 = extractvalue {i32, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val2, i32 -127)
+  %add2 = extractvalue {i32, i1} %t2, 0
+  %obit2 = extractvalue {i32, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val3, i32 -127)
+  %add3 = extractvalue {i32, i1} %t3, 0
+  %obit3 = extractvalue {i32, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val4, i32 -127)
+  %add4 = extractvalue {i32, i1} %t4, 0
+  %obit4 = extractvalue {i32, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val5, i32 -127)
+  %add5 = extractvalue {i32, i1} %t5, 0
+  %obit5 = extractvalue {i32, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val6, i32 -127)
+  %add6 = extractvalue {i32, i1} %t6, 0
+  %obit6 = extractvalue {i32, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val7, i32 -127)
+  %add7 = extractvalue {i32, i1} %t7, 0
+  %obit7 = extractvalue {i32, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val8, i32 -127)
+  %add8 = extractvalue {i32, i1} %t8, 0
+  %obit8 = extractvalue {i32, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val9, i32 -127)
+  %add9 = extractvalue {i32, i1} %t9, 0
+  %obit9 = extractvalue {i32, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val10, i32 -127)
+  %add10 = extractvalue {i32, i1} %t10, 0
+  %obit10 = extractvalue {i32, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val11, i32 -127)
+  %add11 = extractvalue {i32, i1} %t11, 0
+  %obit11 = extractvalue {i32, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val12, i32 -127)
+  %add12 = extractvalue {i32, i1} %t12, 0
+  %obit12 = extractvalue {i32, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val13, i32 -127)
+  %add13 = extractvalue {i32, i1} %t13, 0
+  %obit13 = extractvalue {i32, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val14, i32 -127)
+  %add14 = extractvalue {i32, i1} %t14, 0
+  %obit14 = extractvalue {i32, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %val15, i32 -127)
+  %add15 = extractvalue {i32, i1} %t15, 0
+  %obit15 = extractvalue {i32, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i32 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i32 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i32 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i32 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i32 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i32 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i32 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i32 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i32 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i32 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i32 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i32 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i32 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i32 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i32 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i32 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i32 %new0, i32 *%ptr
+  store volatile i32 %new1, i32 *%ptr
+  store volatile i32 %new2, i32 *%ptr
+  store volatile i32 %new3, i32 *%ptr
+  store volatile i32 %new4, i32 *%ptr
+  store volatile i32 %new5, i32 *%ptr
+  store volatile i32 %new6, i32 *%ptr
+  store volatile i32 %new7, i32 *%ptr
+  store volatile i32 %new8, i32 *%ptr
+  store volatile i32 %new9, i32 *%ptr
+  store volatile i32 %new10, i32 *%ptr
+  store volatile i32 %new11, i32 *%ptr
+  store volatile i32 %new12, i32 *%ptr
+  store volatile i32 %new13, i32 *%ptr
+  store volatile i32 %new14, i32 *%ptr
+  store volatile i32 %new15, i32 *%ptr
+
+  ret i1 %res
+}
+
+; Check using the overflow result for a branch.
+define void @f13(i32 *%ptr) {
+; CHECK-LABEL: f13:
+; CHECK: alsi 0(%r2), -1
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f14(i32 *%ptr) {
+; CHECK-LABEL: f14:
+; CHECK: alsi 0(%r2), -1
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %a = load i32, i32 *%ptr
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 1)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32 *%ptr
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i32 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+

Added: llvm/trunk/test/CodeGen/SystemZ/int-usub-11.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/int-usub-11.ll?rev=331203&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/int-usub-11.ll (added)
+++ llvm/trunk/test/CodeGen/SystemZ/int-usub-11.ll Mon Apr 30 10:54:28 2018
@@ -0,0 +1,359 @@
+; Test 64-bit subtractions of constants from memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @foo()
+
+; Check subtraction of 1.
+define zeroext i1 @f1(i64 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: algsi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the constant range.
+define zeroext i1 @f2(i64 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: algsi 0(%r2), -128
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 128)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next constant up, which must use an addition and a store.
+define zeroext i1 @f3(i64 %dummy, i64 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: lg [[VAL:%r[0-5]]], 0(%r3)
+; CHECK: slgfi [[VAL]], 129
+; CHECK-DAG: stg [[VAL]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 129)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the constant range.
+define zeroext i1 @f4(i64 *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: algsi 0(%r2), 127
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 -127)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next value down, with the same comment as f3.
+define zeroext i1 @f5(i64 %dummy, i64 *%ptr) {
+; CHECK-LABEL: f5:
+; CHECK: lg [[VAL1:%r[0-5]]], 0(%r3)
+; CHECK: lghi [[VAL2:%r[0-9]+]], -128
+; CHECK: slgr [[VAL1]], [[VAL2]]
+; CHECK-DAG: stg [[VAL1]], 0(%r3)
+; CHECK-DAG: ipm [[REG:%r[0-5]]]
+; CHECK-DAG: afi [[REG]], -536870912
+; CHECK-DAG: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 -128)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the high end of the aligned ALGSI range.
+define zeroext i1 @f6(i64 *%base) {
+; CHECK-LABEL: f6:
+; CHECK: algsi 524280(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 65535
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f7(i64 *%base) {
+; CHECK-LABEL: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: algsi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 65536
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the low end of the ALGSI range.
+define zeroext i1 @f8(i64 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: algsi -524288(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 -65536
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check the next word down, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define zeroext i1 @f9(i64 *%base) {
+; CHECK-LABEL: f9:
+; CHECK: agfi %r2, -524296
+; CHECK: algsi 0(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %ptr = getelementptr i64, i64 *%base, i64 -65537
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check that ALGSI does not allow indices.
+define zeroext i1 @f10(i64 %base, i64 %index) {
+; CHECK-LABEL: f10:
+; CHECK: agr %r2, %r3
+; CHECK: algsi 8(%r2), -1
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: afi [[REG]], -536870912
+; CHECK: risbg %r2, [[REG]], 63, 191, 33
+; CHECK: br %r14
+  %add1 = add i64 %base, %index
+  %add2 = add i64 %add1, 8
+  %ptr = inttoptr i64 %add2 to i64 *
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  ret i1 %obit
+}
+
+; Check that subtracting 128 from a spilled value can use ALGSI.
+define zeroext i1 @f11(i64 *%ptr, i64 %sel) {
+; CHECK-LABEL: f11:
+; CHECK: algsi {{[0-9]+}}(%r15), -128
+; CHECK: br %r14
+entry:
+  %val0 = load volatile i64, i64 *%ptr
+  %val1 = load volatile i64, i64 *%ptr
+  %val2 = load volatile i64, i64 *%ptr
+  %val3 = load volatile i64, i64 *%ptr
+  %val4 = load volatile i64, i64 *%ptr
+  %val5 = load volatile i64, i64 *%ptr
+  %val6 = load volatile i64, i64 *%ptr
+  %val7 = load volatile i64, i64 *%ptr
+  %val8 = load volatile i64, i64 *%ptr
+  %val9 = load volatile i64, i64 *%ptr
+  %val10 = load volatile i64, i64 *%ptr
+  %val11 = load volatile i64, i64 *%ptr
+  %val12 = load volatile i64, i64 *%ptr
+  %val13 = load volatile i64, i64 *%ptr
+  %val14 = load volatile i64, i64 *%ptr
+  %val15 = load volatile i64, i64 *%ptr
+
+  %test = icmp ne i64 %sel, 0
+  br i1 %test, label %add, label %store
+
+add:
+  %t0 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val0, i64 128)
+  %add0 = extractvalue {i64, i1} %t0, 0
+  %obit0 = extractvalue {i64, i1} %t0, 1
+  %t1 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val1, i64 128)
+  %add1 = extractvalue {i64, i1} %t1, 0
+  %obit1 = extractvalue {i64, i1} %t1, 1
+  %res1 = or i1 %obit0, %obit1
+  %t2 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val2, i64 128)
+  %add2 = extractvalue {i64, i1} %t2, 0
+  %obit2 = extractvalue {i64, i1} %t2, 1
+  %res2 = or i1 %res1, %obit2
+  %t3 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val3, i64 128)
+  %add3 = extractvalue {i64, i1} %t3, 0
+  %obit3 = extractvalue {i64, i1} %t3, 1
+  %res3 = or i1 %res2, %obit3
+  %t4 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val4, i64 128)
+  %add4 = extractvalue {i64, i1} %t4, 0
+  %obit4 = extractvalue {i64, i1} %t4, 1
+  %res4 = or i1 %res3, %obit4
+  %t5 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val5, i64 128)
+  %add5 = extractvalue {i64, i1} %t5, 0
+  %obit5 = extractvalue {i64, i1} %t5, 1
+  %res5 = or i1 %res4, %obit5
+  %t6 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val6, i64 128)
+  %add6 = extractvalue {i64, i1} %t6, 0
+  %obit6 = extractvalue {i64, i1} %t6, 1
+  %res6 = or i1 %res5, %obit6
+  %t7 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val7, i64 128)
+  %add7 = extractvalue {i64, i1} %t7, 0
+  %obit7 = extractvalue {i64, i1} %t7, 1
+  %res7 = or i1 %res6, %obit7
+  %t8 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val8, i64 128)
+  %add8 = extractvalue {i64, i1} %t8, 0
+  %obit8 = extractvalue {i64, i1} %t8, 1
+  %res8 = or i1 %res7, %obit8
+  %t9 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val9, i64 128)
+  %add9 = extractvalue {i64, i1} %t9, 0
+  %obit9 = extractvalue {i64, i1} %t9, 1
+  %res9 = or i1 %res8, %obit9
+  %t10 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val10, i64 128)
+  %add10 = extractvalue {i64, i1} %t10, 0
+  %obit10 = extractvalue {i64, i1} %t10, 1
+  %res10 = or i1 %res9, %obit10
+  %t11 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val11, i64 128)
+  %add11 = extractvalue {i64, i1} %t11, 0
+  %obit11 = extractvalue {i64, i1} %t11, 1
+  %res11 = or i1 %res10, %obit11
+  %t12 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val12, i64 128)
+  %add12 = extractvalue {i64, i1} %t12, 0
+  %obit12 = extractvalue {i64, i1} %t12, 1
+  %res12 = or i1 %res11, %obit12
+  %t13 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val13, i64 128)
+  %add13 = extractvalue {i64, i1} %t13, 0
+  %obit13 = extractvalue {i64, i1} %t13, 1
+  %res13 = or i1 %res12, %obit13
+  %t14 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val14, i64 128)
+  %add14 = extractvalue {i64, i1} %t14, 0
+  %obit14 = extractvalue {i64, i1} %t14, 1
+  %res14 = or i1 %res13, %obit14
+  %t15 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val15, i64 128)
+  %add15 = extractvalue {i64, i1} %t15, 0
+  %obit15 = extractvalue {i64, i1} %t15, 1
+  %res15 = or i1 %res14, %obit15
+
+  br label %store
+
+store:
+  %new0 = phi i64 [ %val0, %entry ], [ %add0, %add ]
+  %new1 = phi i64 [ %val1, %entry ], [ %add1, %add ]
+  %new2 = phi i64 [ %val2, %entry ], [ %add2, %add ]
+  %new3 = phi i64 [ %val3, %entry ], [ %add3, %add ]
+  %new4 = phi i64 [ %val4, %entry ], [ %add4, %add ]
+  %new5 = phi i64 [ %val5, %entry ], [ %add5, %add ]
+  %new6 = phi i64 [ %val6, %entry ], [ %add6, %add ]
+  %new7 = phi i64 [ %val7, %entry ], [ %add7, %add ]
+  %new8 = phi i64 [ %val8, %entry ], [ %add8, %add ]
+  %new9 = phi i64 [ %val9, %entry ], [ %add9, %add ]
+  %new10 = phi i64 [ %val10, %entry ], [ %add10, %add ]
+  %new11 = phi i64 [ %val11, %entry ], [ %add11, %add ]
+  %new12 = phi i64 [ %val12, %entry ], [ %add12, %add ]
+  %new13 = phi i64 [ %val13, %entry ], [ %add13, %add ]
+  %new14 = phi i64 [ %val14, %entry ], [ %add14, %add ]
+  %new15 = phi i64 [ %val15, %entry ], [ %add15, %add ]
+  %res = phi i1 [ 0, %entry ], [ %res15, %add ]
+
+  store volatile i64 %new0, i64 *%ptr
+  store volatile i64 %new1, i64 *%ptr
+  store volatile i64 %new2, i64 *%ptr
+  store volatile i64 %new3, i64 *%ptr
+  store volatile i64 %new4, i64 *%ptr
+  store volatile i64 %new5, i64 *%ptr
+  store volatile i64 %new6, i64 *%ptr
+  store volatile i64 %new7, i64 *%ptr
+  store volatile i64 %new8, i64 *%ptr
+  store volatile i64 %new9, i64 *%ptr
+  store volatile i64 %new10, i64 *%ptr
+  store volatile i64 %new11, i64 *%ptr
+  store volatile i64 %new12, i64 *%ptr
+  store volatile i64 %new13, i64 *%ptr
+  store volatile i64 %new14, i64 *%ptr
+  store volatile i64 %new15, i64 *%ptr
+
+  ret i1 %res
+}
+
+; Check using the overflow result for a branch.
+define void @f12(i64 *%ptr) {
+; CHECK-LABEL: f12:
+; CHECK: algsi 0(%r2), -1
+; CHECK: jgle foo at PLT
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  br i1 %obit, label %call, label %exit
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+; ... and the same with the inverted direction.
+define void @f13(i64 *%ptr) {
+; CHECK-LABEL: f13:
+; CHECK: algsi 0(%r2), -1
+; CHECK: jgnle foo at PLT
+; CHECK: br %r14
+  %a = load i64, i64 *%ptr
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 1)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64 *%ptr
+  br i1 %obit, label %exit, label %call
+
+call:
+  tail call i64 @foo()
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+




More information about the llvm-commits mailing list