[llvm] 586d5f9 - [SPARC] Improve integer branch handling for v9 targets

Brad Smith via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 16 17:51:30 PST 2022


Author: Koakuma
Date: 2022-11-16T20:51:20-05:00
New Revision: 586d5f91e6761747382b4f51df4acd340cd187e5

URL: https://github.com/llvm/llvm-project/commit/586d5f91e6761747382b4f51df4acd340cd187e5
DIFF: https://github.com/llvm/llvm-project/commit/586d5f91e6761747382b4f51df4acd340cd187e5.diff

LOG: [SPARC] Improve integer branch handling for v9 targets

Do not emit deprecated v8-style branches when targeting a v9 processor.

As a side effect, this also fixes the emission of useless ba's when doing
conditional branches on 64-bit integer values.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D130006

Added: 
    llvm/test/CodeGen/SPARC/branches-v9.ll

Modified: 
    llvm/lib/Target/Sparc/SparcISelLowering.cpp
    llvm/lib/Target/Sparc/SparcISelLowering.h
    llvm/lib/Target/Sparc/SparcInstr64Bit.td
    llvm/lib/Target/Sparc/SparcInstrInfo.cpp
    llvm/lib/Target/Sparc/SparcInstrInfo.td
    llvm/test/CodeGen/SPARC/atomics.ll
    llvm/test/CodeGen/SPARC/hard-quad-float.ll
    llvm/test/CodeGen/SPARC/missinglabel.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 4d8b3f65d7a29..68cd43d66f474 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1928,7 +1928,10 @@ const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
   case SPISD::CMPICC:          return "SPISD::CMPICC";
   case SPISD::CMPFCC:          return "SPISD::CMPFCC";
   case SPISD::BRICC:           return "SPISD::BRICC";
-  case SPISD::BRXCC:           return "SPISD::BRXCC";
+  case SPISD::BPICC:
+    return "SPISD::BPICC";
+  case SPISD::BPXCC:
+    return "SPISD::BPXCC";
   case SPISD::BRFCC:           return "SPISD::BRFCC";
   case SPISD::SELECT_ICC:      return "SPISD::SELECT_ICC";
   case SPISD::SELECT_XCC:      return "SPISD::SELECT_XCC";
@@ -2533,8 +2536,8 @@ static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
 }
 
 static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
-                          const SparcTargetLowering &TLI,
-                          bool hasHardQuad) {
+                          const SparcTargetLowering &TLI, bool hasHardQuad,
+                          bool isV9) {
   SDValue Chain = Op.getOperand(0);
   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
   SDValue LHS = Op.getOperand(2);
@@ -2552,13 +2555,17 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
   if (LHS.getValueType().isInteger()) {
     CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
     if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
-    // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
-    Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
+    if (isV9)
+      // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
+      Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
+    else
+      // Non-v9 targets don't have xcc.
+      Opc = SPISD::BRICC;
   } else {
     if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
       if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
       CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
-      Opc = SPISD::BRICC;
+      Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
     } else {
       CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
       if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
@@ -3141,8 +3148,8 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
                                                        hasHardQuad);
   case ISD::UINT_TO_FP:         return LowerUINT_TO_FP(Op, DAG, *this,
                                                        hasHardQuad);
-  case ISD::BR_CC:              return LowerBR_CC(Op, DAG, *this,
-                                                  hasHardQuad);
+  case ISD::BR_CC:
+    return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9);
   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG, *this,
                                                       hasHardQuad);
   case ISD::VASTART:            return LowerVASTART(Op, DAG, *this);
@@ -3221,6 +3228,8 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   case SP::SELECT_CC_FP_ICC:
   case SP::SELECT_CC_DFP_ICC:
   case SP::SELECT_CC_QFP_ICC:
+    if (Subtarget->isV9())
+      return expandSelectCC(MI, BB, SP::BPICC);
     return expandSelectCC(MI, BB, SP::BCOND);
   case SP::SELECT_CC_Int_XCC:
   case SP::SELECT_CC_FP_XCC:

diff  --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h
index 16e4f26870548..3978126391ba8 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -21,37 +21,39 @@ namespace llvm {
   class SparcSubtarget;
 
   namespace SPISD {
-    enum NodeType : unsigned {
-      FIRST_NUMBER = ISD::BUILTIN_OP_END,
-      CMPICC,      // Compare two GPR operands, set icc+xcc.
-      CMPFCC,      // Compare two FP operands, set fcc.
-      BRICC,       // Branch to dest on icc condition
-      BRXCC,       // Branch to dest on xcc condition (64-bit only).
-      BRFCC,       // Branch to dest on fcc condition
-      SELECT_ICC,  // Select between two values using the current ICC flags.
-      SELECT_XCC,  // Select between two values using the current XCC flags.
-      SELECT_FCC,  // Select between two values using the current FCC flags.
-
-      Hi, Lo,      // Hi/Lo operations, typically on a global address.
-
-      FTOI,        // FP to Int within a FP register.
-      ITOF,        // Int to FP within a FP register.
-      FTOX,        // FP to Int64 within a FP register.
-      XTOF,        // Int64 to FP within a FP register.
-
-      CALL,        // A call instruction.
-      RET_FLAG,    // Return with a flag operand.
-      GLOBAL_BASE_REG, // Global base reg for PIC.
-      FLUSHW,      // FLUSH register windows to stack.
-
-      TAIL_CALL,   // Tail call
-
-      TLS_ADD,     // For Thread Local Storage (TLS).
-      TLS_LD,
-      TLS_CALL,
-
-      LOAD_GDOP,   // Load operation w/ gdop relocation.
-    };
+  enum NodeType : unsigned {
+    FIRST_NUMBER = ISD::BUILTIN_OP_END,
+    CMPICC, // Compare two GPR operands, set icc+xcc.
+    CMPFCC, // Compare two FP operands, set fcc.
+    BRICC,  // Branch to dest on icc condition
+    BPICC,  // Branch to dest on icc condition, with prediction (64-bit only).
+    BPXCC,  // Branch to dest on xcc condition, with prediction (64-bit only).
+    BRFCC,  // Branch to dest on fcc condition
+    SELECT_ICC, // Select between two values using the current ICC flags.
+    SELECT_XCC, // Select between two values using the current XCC flags.
+    SELECT_FCC, // Select between two values using the current FCC flags.
+
+    Hi,
+    Lo, // Hi/Lo operations, typically on a global address.
+
+    FTOI, // FP to Int within a FP register.
+    ITOF, // Int to FP within a FP register.
+    FTOX, // FP to Int64 within a FP register.
+    XTOF, // Int64 to FP within a FP register.
+
+    CALL,            // A call instruction.
+    RET_FLAG,        // Return with a flag operand.
+    GLOBAL_BASE_REG, // Global base reg for PIC.
+    FLUSHW,          // FLUSH register windows to stack.
+
+    TAIL_CALL, // Tail call
+
+    TLS_ADD, // For Thread Local Storage (TLS).
+    TLS_LD,
+    TLS_CALL,
+
+    LOAD_GDOP, // Load operation w/ gdop relocation.
+  };
   }
 
   class SparcTargetLowering : public TargetLowering {

diff  --git a/llvm/lib/Target/Sparc/SparcInstr64Bit.td b/llvm/lib/Target/Sparc/SparcInstr64Bit.td
index 5c49cc5e36340..27a39602d60f9 100644
--- a/llvm/lib/Target/Sparc/SparcInstr64Bit.td
+++ b/llvm/lib/Target/Sparc/SparcInstr64Bit.td
@@ -310,13 +310,13 @@ def : Pat<(store (i64 0), ADDRri:$dst), (STXri ADDRri:$dst, (i64 G0))>;
 // The icc flags correspond to the 32-bit result, and the xcc are for the
 // full 64-bit result.
 //
-// We reuse CMPICC SDNodes for compares, but use new BRXCC branch nodes for
+// We reuse CMPICC SDNodes for compares, but use new BPXCC branch nodes for
 // 64-bit compares. See LowerBR_CC.
 
 let Predicates = [Is64Bit] in {
 
 let Uses = [ICC], cc = 0b10 in
-  defm BPX : IPredBranch<"%xcc", [(SPbrxcc bb:$imm19, imm:$cond)]>;
+  defm BPX : IPredBranch<"%xcc", [(SPbpxcc bb:$imm19, imm:$cond)]>;
 
 // Conditional moves on %xcc.
 let Uses = [ICC], Constraints = "$f = $rd" in {

diff  --git a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index 595670735c74b..c7c643b57a51a 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -140,10 +140,25 @@ static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
   llvm_unreachable("Invalid cond code");
 }
 
-static bool isUncondBranchOpcode(int Opc) { return Opc == SP::BA; }
+static bool isUncondBranchOpcode(int Opc) {
+  return Opc == SP::BA || Opc == SP::BPA;
+}
+
+static bool isI32CondBranchOpcode(int Opc) {
+  return Opc == SP::BCOND || Opc == SP::BPICC || Opc == SP::BPICCA ||
+         Opc == SP::BPICCNT || Opc == SP::BPICCANT;
+}
+
+static bool isI64CondBranchOpcode(int Opc) {
+  return Opc == SP::BPXCC || Opc == SP::BPXCCA || Opc == SP::BPXCCNT ||
+         Opc == SP::BPXCCANT;
+}
+
+static bool isFCondBranchOpcode(int Opc) { return Opc == SP::FBCOND; }
 
 static bool isCondBranchOpcode(int Opc) {
-  return Opc == SP::FBCOND || Opc == SP::BCOND;
+  return isI32CondBranchOpcode(Opc) || isI64CondBranchOpcode(Opc) ||
+         isFCondBranchOpcode(Opc);
 }
 
 static bool isIndirectBranchOpcode(int Opc) {
@@ -152,7 +167,14 @@ static bool isIndirectBranchOpcode(int Opc) {
 
 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
                             SmallVectorImpl<MachineOperand> &Cond) {
-  Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(1).getImm()));
+  unsigned Opc = LastInst->getOpcode();
+  int64_t CC = LastInst->getOperand(1).getImm();
+
+  // Push the branch opcode into Cond too so later in insertBranch
+  // it can use the information to emit the correct SPARC branch opcode.
+  Cond.push_back(MachineOperand::CreateImm(Opc));
+  Cond.push_back(MachineOperand::CreateImm(CC));
+
   Target = LastInst->getOperand(0).getMBB();
 }
 
@@ -246,27 +268,29 @@ unsigned SparcInstrInfo::insertBranch(MachineBasicBlock &MBB,
                                       const DebugLoc &DL,
                                       int *BytesAdded) const {
   assert(TBB && "insertBranch must not be told to insert a fallthrough");
-  assert((Cond.size() == 1 || Cond.size() == 0) &&
-         "Sparc branch conditions should have one component!");
+  assert((Cond.size() <= 2) &&
+         "Sparc branch conditions should have at most two components!");
   assert(!BytesAdded && "code size not handled");
 
   if (Cond.empty()) {
     assert(!FBB && "Unconditional branch with multiple successors!");
-    BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB);
+    BuildMI(&MBB, DL, get(Subtarget.isV9() ? SP::BPA : SP::BA)).addMBB(TBB);
     return 1;
   }
 
   // Conditional branch
-  unsigned CC = Cond[0].getImm();
+  unsigned Opc = Cond[0].getImm();
+  unsigned CC = Cond[1].getImm();
 
-  if (IsIntegerCC(CC))
-    BuildMI(&MBB, DL, get(SP::BCOND)).addMBB(TBB).addImm(CC);
-  else
+  if (IsIntegerCC(CC)) {
+    BuildMI(&MBB, DL, get(Opc)).addMBB(TBB).addImm(CC);
+  } else {
     BuildMI(&MBB, DL, get(SP::FBCOND)).addMBB(TBB).addImm(CC);
+  }
   if (!FBB)
     return 1;
 
-  BuildMI(&MBB, DL, get(SP::BA)).addMBB(FBB);
+  BuildMI(&MBB, DL, get(Subtarget.isV9() ? SP::BPA : SP::BA)).addMBB(FBB);
   return 2;
 }
 
@@ -282,9 +306,8 @@ unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB,
     if (I->isDebugInstr())
       continue;
 
-    if (I->getOpcode() != SP::BA
-        && I->getOpcode() != SP::BCOND
-        && I->getOpcode() != SP::FBCOND)
+    if (!isCondBranchOpcode(I->getOpcode()) &&
+        !isUncondBranchOpcode(I->getOpcode()))
       break; // Not a branch
 
     I->eraseFromParent();
@@ -296,9 +319,9 @@ unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB,
 
 bool SparcInstrInfo::reverseBranchCondition(
     SmallVectorImpl<MachineOperand> &Cond) const {
-  assert(Cond.size() == 1);
-  SPCC::CondCodes CC = static_cast<SPCC::CondCodes>(Cond[0].getImm());
-  Cond[0].setImm(GetOppositeBranchCondition(CC));
+  assert(Cond.size() <= 2);
+  SPCC::CondCodes CC = static_cast<SPCC::CondCodes>(Cond[1].getImm());
+  Cond[1].setImm(GetOppositeBranchCondition(CC));
   return false;
 }
 

diff  --git a/llvm/lib/Target/Sparc/SparcInstrInfo.td b/llvm/lib/Target/Sparc/SparcInstrInfo.td
index baf866bde589d..c06535f6692cd 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -242,7 +242,8 @@ SDTypeProfile<1, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
 def SPcmpicc : SDNode<"SPISD::CMPICC", SDTSPcmpicc, [SDNPOutGlue]>;
 def SPcmpfcc : SDNode<"SPISD::CMPFCC", SDTSPcmpfcc, [SDNPOutGlue]>;
 def SPbricc : SDNode<"SPISD::BRICC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
-def SPbrxcc : SDNode<"SPISD::BRXCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
+def SPbpicc : SDNode<"SPISD::BPICC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
+def SPbpxcc : SDNode<"SPISD::BPXCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
 def SPbrfcc : SDNode<"SPISD::BRFCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
 
 def SPhi    : SDNode<"SPISD::Hi", SDTIntUnaryOp>;
@@ -845,19 +846,27 @@ defm SAVE    : F3_12np<"save"   , 0b111100>;
 defm RESTORE : F3_12np<"restore", 0b111101>;
 
 // Section B.21 - Branch on Integer Condition Codes Instructions, p. 119
+// Section A.7 - Branch on Integer Condition Codes with Prediction (SPARC v9)
 
+let isBranch = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1 in {
 // unconditional branch class.
 class BranchAlways<dag ins, string asmstr, list<dag> pattern>
-  : F2_2<0b010, 0, (outs), ins, asmstr, pattern> {
-  let isBranch     = 1;
-  let isTerminator = 1;
-  let hasDelaySlot = 1;
-  let isBarrier    = 1;
+  : F2_2<0b010, 0, (outs), ins, asmstr, pattern>;
+
+// Same as BranchAlways but uses the new v9 encoding
+class BranchPredictAlways<dag ins, string asmstr, list<dag> pattern>
+  : F2_3<0b001, 0, 1, (outs), ins, asmstr, pattern>;
 }
 
-let cond = 8 in
-  def BA : BranchAlways<(ins brtarget:$imm22), "ba $imm22", [(br bb:$imm22)]>;
+let cond = 8 in {
+  // If we're compiling for v9, prefer BPA rather than BA
+  // TODO: Disallow BA emission when FeatureV8Deprecated isn't enabled
+  let Predicates = [HasV9], cc = 0b00 in
+    def BPA : BranchPredictAlways<(ins bprtarget:$imm19),
+      "ba %icc, $imm19", [(br bb:$imm19)]>;
 
+  def BA : BranchAlways<(ins brtarget:$imm22), "ba $imm22", [(br bb:$imm22)]>;
+}
 
 let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in {
 
@@ -913,7 +922,7 @@ let Uses = [ICC] in {
                          "b$cond,a $imm22", []>;
 
   let Predicates = [HasV9], cc = 0b00 in
-    defm BPI : IPredBranch<"%icc", []>;
+    defm BPI : IPredBranch<"%icc", [(SPbpicc bb:$imm19, imm:$cond)]>;
 }
 
 // Section B.22 - Branch on Floating-point Condition Codes Instructions, p. 121

diff  --git a/llvm/test/CodeGen/SPARC/atomics.ll b/llvm/test/CodeGen/SPARC/atomics.ll
index c6ecbaf911dd2..61cd5258b0f72 100644
--- a/llvm/test/CodeGen/SPARC/atomics.ll
+++ b/llvm/test/CodeGen/SPARC/atomics.ll
@@ -1,13 +1,20 @@
-; RUN: llc < %s -march=sparc -mcpu=v9 -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -march=sparcv9 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -march=sparc -mcpu=v9 -verify-machineinstrs | FileCheck %s --check-prefixes=SPARC
+; RUN: llc < %s -march=sparcv9 -verify-machineinstrs | FileCheck %s --check-prefixes=SPARC64
 
-; CHECK-LABEL: test_atomic_i8
-; CHECK:       ldub [%o0]
-; CHECK:       membar
-; CHECK:       ldub [%o1]
-; CHECK:       membar
-; CHECK:       membar
-; CHECK:       stb {{.+}}, [%o2]
+; SPARC-LABEL: test_atomic_i8
+; SPARC:       ldub [%o0]
+; SPARC:       membar
+; SPARC:       ldub [%o1]
+; SPARC:       membar
+; SPARC:       membar
+; SPARC:       stb {{.+}}, [%o2]
+; SPARC64-LABEL: test_atomic_i8
+; SPARC64:       ldub [%o0]
+; SPARC64:       membar
+; SPARC64:       ldub [%o1]
+; SPARC64:       membar
+; SPARC64:       membar
+; SPARC64:       stb {{.+}}, [%o2]
 define i8 @test_atomic_i8(i8* %ptr1, i8* %ptr2, i8* %ptr3) {
 entry:
   %0 = load atomic i8, i8* %ptr1 acquire, align 1
@@ -17,13 +24,20 @@ entry:
   ret i8 %2
 }
 
-; CHECK-LABEL: test_atomic_i16
-; CHECK:       lduh [%o0]
-; CHECK:       membar
-; CHECK:       lduh [%o1]
-; CHECK:       membar
-; CHECK:       membar
-; CHECK:       sth {{.+}}, [%o2]
+; SPARC-LABEL: test_atomic_i16
+; SPARC:       lduh [%o0]
+; SPARC:       membar
+; SPARC:       lduh [%o1]
+; SPARC:       membar
+; SPARC:       membar
+; SPARC:       sth {{.+}}, [%o2]
+; SPARC64-LABEL: test_atomic_i16
+; SPARC64:       lduh [%o0]
+; SPARC64:       membar
+; SPARC64:       lduh [%o1]
+; SPARC64:       membar
+; SPARC64:       membar
+; SPARC64:       sth {{.+}}, [%o2]
 define i16 @test_atomic_i16(i16* %ptr1, i16* %ptr2, i16* %ptr3) {
 entry:
   %0 = load atomic i16, i16* %ptr1 acquire, align 2
@@ -33,13 +47,20 @@ entry:
   ret i16 %2
 }
 
-; CHECK-LABEL: test_atomic_i32
-; CHECK:       ld [%o0]
-; CHECK:       membar
-; CHECK:       ld [%o1]
-; CHECK:       membar
-; CHECK:       membar
-; CHECK:       st {{.+}}, [%o2]
+; SPARC-LABEL: test_atomic_i32
+; SPARC:       ld [%o0]
+; SPARC:       membar
+; SPARC:       ld [%o1]
+; SPARC:       membar
+; SPARC:       membar
+; SPARC:       st {{.+}}, [%o2]
+; SPARC64-LABEL: test_atomic_i32
+; SPARC64:       ld [%o0]
+; SPARC64:       membar
+; SPARC64:       ld [%o1]
+; SPARC64:       membar
+; SPARC64:       membar
+; SPARC64:       st {{.+}}, [%o2]
 define i32 @test_atomic_i32(i32* %ptr1, i32* %ptr2, i32* %ptr3) {
 entry:
   %0 = load atomic i32, i32* %ptr1 acquire, align 4
@@ -53,38 +74,70 @@ entry:
 ;; redundant here. There's something weird happening in optimization
 ;; of the success value of cmpxchg.
 
-; CHECK-LABEL: test_cmpxchg_i8
-; CHECK:       and %o1, -4, %o2
-; CHECK:       mov  3, %o3
-; CHECK:       andn %o3, %o1, %o1
-; CHECK:       sll %o1, 3, %o1
-; CHECK:       mov  255, %o3
-; CHECK:       sll %o3, %o1, %o5
-; CHECK:       xor %o5, -1, %o3
-; CHECK:       mov  123, %o4
-; CHECK:       ld [%o2], %g2
-; CHECK:       sll %o4, %o1, %o4
-; CHECK:       and %o0, 255, %o0
-; CHECK:       sll %o0, %o1, %o0
-; CHECK:       andn %g2, %o5, %g2
-; CHECK:       mov %g0, %o5
-; CHECK:      [[LABEL1:\.L.*]]:
-; CHECK:       or %g2, %o4, %g3
-; CHECK:       or %g2, %o0, %g4
-; CHECK:       cas [%o2], %g4, %g3
-; CHECK:       cmp %g3, %g4
-; CHECK:       mov  %o5, %g4
-; CHECK:       move %icc, 1, %g4
-; CHECK:       cmp %g4, 0
-; CHECK:       bne  [[LABEL2:\.L.*]]
-; CHECK:       nop
-; CHECK:       and %g3, %o3, %g4
-; CHECK:       cmp %g2, %g4
-; CHECK:       bne  [[LABEL1]]
-; CHECK:       mov  %g4, %g2
-; CHECK:      [[LABEL2]]:
-; CHECK:       retl
-; CHECK:       srl %g3, %o1, %o0
+; SPARC-LABEL: test_cmpxchg_i8
+; SPARC:       and %o1, -4, %o2
+; SPARC:       mov  3, %o3
+; SPARC:       andn %o3, %o1, %o1
+; SPARC:       sll %o1, 3, %o1
+; SPARC:       mov  255, %o3
+; SPARC:       sll %o3, %o1, %o5
+; SPARC:       xor %o5, -1, %o3
+; SPARC:       mov  123, %o4
+; SPARC:       ld [%o2], %g2
+; SPARC:       sll %o4, %o1, %o4
+; SPARC:       and %o0, 255, %o0
+; SPARC:       sll %o0, %o1, %o0
+; SPARC:       andn %g2, %o5, %g2
+; SPARC:       mov %g0, %o5
+; SPARC:      [[LABEL1:\.L.*]]:
+; SPARC:       or %g2, %o4, %g3
+; SPARC:       or %g2, %o0, %g4
+; SPARC:       cas [%o2], %g4, %g3
+; SPARC:       cmp %g3, %g4
+; SPARC:       mov  %o5, %g4
+; SPARC:       move %icc, 1, %g4
+; SPARC:       cmp %g4, 0
+; SPARC:       bne %icc, [[LABEL2:\.L.*]]
+; SPARC:       nop
+; SPARC:       and %g3, %o3, %g4
+; SPARC:       cmp %g2, %g4
+; SPARC:       bne %icc, [[LABEL1]]
+; SPARC:       mov  %g4, %g2
+; SPARC:      [[LABEL2]]:
+; SPARC:       retl
+; SPARC:       srl %g3, %o1, %o0
+; SPARC64-LABEL: test_cmpxchg_i8
+; SPARC64:       and %o1, -4, %o2
+; SPARC64:       mov  3, %o3
+; SPARC64:       andn %o3, %o1, %o1
+; SPARC64:       sll %o1, 3, %o1
+; SPARC64:       mov  255, %o3
+; SPARC64:       sll %o3, %o1, %o5
+; SPARC64:       xor %o5, -1, %o3
+; SPARC64:       mov  123, %o4
+; SPARC64:       ld [%o2], %g2
+; SPARC64:       sll %o4, %o1, %o4
+; SPARC64:       and %o0, 255, %o0
+; SPARC64:       sll %o0, %o1, %o0
+; SPARC64:       andn %g2, %o5, %g2
+; SPARC64:       mov %g0, %o5
+; SPARC64:      [[LABEL1:\.L.*]]:
+; SPARC64:       or %g2, %o4, %g3
+; SPARC64:       or %g2, %o0, %g4
+; SPARC64:       cas [%o2], %g4, %g3
+; SPARC64:       cmp %g3, %g4
+; SPARC64:       mov  %o5, %g4
+; SPARC64:       move %icc, 1, %g4
+; SPARC64:       cmp %g4, 0
+; SPARC64:       bne %icc, [[LABEL2:\.L.*]]
+; SPARC64:       nop
+; SPARC64:       and %g3, %o3, %g4
+; SPARC64:       cmp %g2, %g4
+; SPARC64:       bne %icc, [[LABEL1]]
+; SPARC64:       mov  %g4, %g2
+; SPARC64:      [[LABEL2]]:
+; SPARC64:       retl
+; SPARC64:       srl %g3, %o1, %o0
 define i8 @test_cmpxchg_i8(i8 %a, i8* %ptr) {
 entry:
   %pair = cmpxchg i8* %ptr, i8 %a, i8 123 monotonic monotonic
@@ -92,40 +145,72 @@ entry:
   ret i8 %b
 }
 
-; CHECK-LABEL: test_cmpxchg_i16
+; SPARC-LABEL: test_cmpxchg_i16
 
-; CHECK:       and %o1, -4, %o2
-; CHECK:       and %o1, 3, %o1
-; CHECK:       xor %o1, 2, %o1
-; CHECK:       sll %o1, 3, %o1
-; CHECK:       sethi 63, %o3
-; CHECK:       or %o3, 1023, %o4
-; CHECK:       sll %o4, %o1, %o5
-; CHECK:       xor %o5, -1, %o3
-; CHECK:       and %o0, %o4, %o4
-; CHECK:       ld [%o2], %g2
-; CHECK:       mov  123, %o0
-; CHECK:       sll %o0, %o1, %o0
-; CHECK:       sll %o4, %o1, %o4
-; CHECK:       andn %g2, %o5, %g2
-; CHECK:       mov %g0, %o5
-; CHECK:      [[LABEL1:\.L.*]]:
-; CHECK:       or %g2, %o0, %g3
-; CHECK:       or %g2, %o4, %g4
-; CHECK:       cas [%o2], %g4, %g3
-; CHECK:       cmp %g3, %g4
-; CHECK:       mov  %o5, %g4
-; CHECK:       move %icc, 1, %g4
-; CHECK:       cmp %g4, 0
-; CHECK:       bne  [[LABEL2:\.L.*]]
-; CHECK:       nop
-; CHECK:       and %g3, %o3, %g4
-; CHECK:       cmp %g2, %g4
-; CHECK:       bne  [[LABEL1]]
-; CHECK:       mov  %g4, %g2
-; CHECK:      [[LABEL2]]:
-; CHECK:       retl
-; CHECK:       srl %g3, %o1, %o0
+; SPARC:       and %o1, -4, %o2
+; SPARC:       and %o1, 3, %o1
+; SPARC:       xor %o1, 2, %o1
+; SPARC:       sll %o1, 3, %o1
+; SPARC:       sethi 63, %o3
+; SPARC:       or %o3, 1023, %o4
+; SPARC:       sll %o4, %o1, %o5
+; SPARC:       xor %o5, -1, %o3
+; SPARC:       and %o0, %o4, %o4
+; SPARC:       ld [%o2], %g2
+; SPARC:       mov  123, %o0
+; SPARC:       sll %o0, %o1, %o0
+; SPARC:       sll %o4, %o1, %o4
+; SPARC:       andn %g2, %o5, %g2
+; SPARC:       mov %g0, %o5
+; SPARC:      [[LABEL1:\.L.*]]:
+; SPARC:       or %g2, %o0, %g3
+; SPARC:       or %g2, %o4, %g4
+; SPARC:       cas [%o2], %g4, %g3
+; SPARC:       cmp %g3, %g4
+; SPARC:       mov  %o5, %g4
+; SPARC:       move %icc, 1, %g4
+; SPARC:       cmp %g4, 0
+; SPARC:       bne %icc, [[LABEL2:\.L.*]]
+; SPARC:       nop
+; SPARC:       and %g3, %o3, %g4
+; SPARC:       cmp %g2, %g4
+; SPARC:       bne %icc, [[LABEL1]]
+; SPARC:       mov  %g4, %g2
+; SPARC:      [[LABEL2]]:
+; SPARC:       retl
+; SPARC:       srl %g3, %o1, %o0
+; SPARC64:       and %o1, -4, %o2
+; SPARC64:       and %o1, 3, %o1
+; SPARC64:       xor %o1, 2, %o1
+; SPARC64:       sll %o1, 3, %o1
+; SPARC64:       sethi 63, %o3
+; SPARC64:       or %o3, 1023, %o4
+; SPARC64:       sll %o4, %o1, %o5
+; SPARC64:       xor %o5, -1, %o3
+; SPARC64:       and %o0, %o4, %o4
+; SPARC64:       ld [%o2], %g2
+; SPARC64:       mov  123, %o0
+; SPARC64:       sll %o0, %o1, %o0
+; SPARC64:       sll %o4, %o1, %o4
+; SPARC64:       andn %g2, %o5, %g2
+; SPARC64:       mov %g0, %o5
+; SPARC64:      [[LABEL1:\.L.*]]:
+; SPARC64:       or %g2, %o0, %g3
+; SPARC64:       or %g2, %o4, %g4
+; SPARC64:       cas [%o2], %g4, %g3
+; SPARC64:       cmp %g3, %g4
+; SPARC64:       mov  %o5, %g4
+; SPARC64:       move %icc, 1, %g4
+; SPARC64:       cmp %g4, 0
+; SPARC64:       bne %icc, [[LABEL2:\.L.*]]
+; SPARC64:       nop
+; SPARC64:       and %g3, %o3, %g4
+; SPARC64:       cmp %g2, %g4
+; SPARC64:       bne %icc, [[LABEL1]]
+; SPARC64:       mov  %g4, %g2
+; SPARC64:      [[LABEL2]]:
+; SPARC64:       retl
+; SPARC64:       srl %g3, %o1, %o0
 define i16 @test_cmpxchg_i16(i16 %a, i16* %ptr) {
 entry:
   %pair = cmpxchg i16* %ptr, i16 %a, i16 123 monotonic monotonic
@@ -133,10 +218,12 @@ entry:
   ret i16 %b
 }
 
-; CHECK-LABEL: test_cmpxchg_i32
-; CHECK:       mov 123, [[R:%[gilo][0-7]]]
-; CHECK:       cas [%o1], %o0, [[R]]
-
+; SPARC-LABEL: test_cmpxchg_i32
+; SPARC:       mov 123, [[R:%[gilo][0-7]]]
+; SPARC:       cas [%o1], %o0, [[R]]
+; SPARC64-LABEL: test_cmpxchg_i32
+; SPARC64:       mov 123, [[R:%[gilo][0-7]]]
+; SPARC64:       cas [%o1], %o0, [[R]]
 define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) {
 entry:
   %pair = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic monotonic
@@ -144,114 +231,162 @@ entry:
   ret i32 %b
 }
 
-; CHECK-LABEL: test_swap_i8
-; CHECK:       mov 42, [[R:%[gilo][0-7]]]
-; CHECK:       cas
-
+; SPARC-LABEL: test_swap_i8
+; SPARC:       mov 42, [[R:%[gilo][0-7]]]
+; SPARC:       cas
+; SPARC64-LABEL: test_swap_i8
+; SPARC64:       mov 42, [[R:%[gilo][0-7]]]
+; SPARC64:       cas
 define i8 @test_swap_i8(i8 %a, i8* %ptr) {
 entry:
   %b = atomicrmw xchg i8* %ptr, i8 42 monotonic
   ret i8 %b
 }
 
-; CHECK-LABEL: test_swap_i16
-; CHECK:       mov 42, [[R:%[gilo][0-7]]]
-; CHECK:       cas
-
+; SPARC-LABEL: test_swap_i16
+; SPARC:       mov 42, [[R:%[gilo][0-7]]]
+; SPARC:       cas
+; SPARC64-LABEL: test_swap_i16
+; SPARC64:       mov 42, [[R:%[gilo][0-7]]]
+; SPARC64:       cas
 define i16 @test_swap_i16(i16 %a, i16* %ptr) {
 entry:
   %b = atomicrmw xchg i16* %ptr, i16 42 monotonic
   ret i16 %b
 }
 
-; CHECK-LABEL: test_swap_i32
-; CHECK:       mov 42, [[R:%[gilo][0-7]]]
-; CHECK:       swap [%o1], [[R]]
-
+; SPARC-LABEL: test_swap_i32
+; SPARC:       mov 42, [[R:%[gilo][0-7]]]
+; SPARC:       swap [%o1], [[R]]
+; SPARC64-LABEL: test_swap_i32
+; SPARC64:       mov 42, [[R:%[gilo][0-7]]]
+; SPARC64:       swap [%o1], [[R]]
 define i32 @test_swap_i32(i32 %a, i32* %ptr) {
 entry:
   %b = atomicrmw xchg i32* %ptr, i32 42 monotonic
   ret i32 %b
 }
 
-; CHECK-LABEL: test_load_sub_i8
-; CHECK: membar
-; CHECK: .L{{.*}}:
-; CHECK: sub
-; CHECK: cas [{{%[gilo][0-7]}}]
-; CHECK: membar
+; SPARC-LABEL: test_load_sub_i8
+; SPARC: membar
+; SPARC: .L{{.*}}:
+; SPARC: sub
+; SPARC: cas [{{%[gilo][0-7]}}]
+; SPARC: membar
+; SPARC64-LABEL: test_load_sub_i8
+; SPARC64: membar
+; SPARC64: .L{{.*}}:
+; SPARC64: sub
+; SPARC64: cas [{{%[gilo][0-7]}}]
+; SPARC64: membar
 define zeroext i8 @test_load_sub_i8(i8* %p, i8 zeroext %v) {
 entry:
   %0 = atomicrmw sub i8* %p, i8 %v seq_cst
   ret i8 %0
 }
 
-; CHECK-LABEL: test_load_sub_i16
-; CHECK: membar
-; CHECK: .L{{.*}}:
-; CHECK: sub
-; CHECK: cas [{{%[gilo][0-7]}}]
-; CHECK: membar
+; SPARC-LABEL: test_load_sub_i16
+; SPARC: membar
+; SPARC: .L{{.*}}:
+; SPARC: sub
+; SPARC: cas [{{%[gilo][0-7]}}]
+; SPARC: membar
+; SPARC64-LABEL: test_load_sub_i16
+; SPARC64: membar
+; SPARC64: .L{{.*}}:
+; SPARC64: sub
+; SPARC64: cas [{{%[gilo][0-7]}}]
+; SPARC64: membar
 define zeroext i16 @test_load_sub_i16(i16* %p, i16 zeroext %v) {
 entry:
   %0 = atomicrmw sub i16* %p, i16 %v seq_cst
   ret i16 %0
 }
 
-; CHECK-LABEL: test_load_add_i32
-; CHECK: membar
-; CHECK: mov %g0
-; CHECK: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]]
-; CHECK: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
-; CHECK: cas [%o0], [[V]], [[V2]]
-; CHECK: membar
+; SPARC-LABEL: test_load_add_i32
+; SPARC: membar
+; SPARC: mov %g0
+; SPARC: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]]
+; SPARC: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
+; SPARC: cas [%o0], [[V]], [[V2]]
+; SPARC: membar
+; SPARC64-LABEL: test_load_add_i32
+; SPARC64: membar
+; SPARC64: mov %g0
+; SPARC64: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]]
+; SPARC64: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
+; SPARC64: cas [%o0], [[V]], [[V2]]
+; SPARC64: membar
 define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) {
 entry:
   %0 = atomicrmw add i32* %p, i32 %v seq_cst
   ret i32 %0
 }
 
-; CHECK-LABEL: test_load_xor_32
-; CHECK: membar
-; CHECK: xor
-; CHECK: cas [%o0]
-; CHECK: membar
+; SPARC-LABEL: test_load_xor_32
+; SPARC: membar
+; SPARC: xor
+; SPARC: cas [%o0]
+; SPARC: membar
+; SPARC64-LABEL: test_load_xor_32
+; SPARC64: membar
+; SPARC64: xor
+; SPARC64: cas [%o0]
+; SPARC64: membar
 define zeroext i32 @test_load_xor_32(i32* %p, i32 zeroext %v) {
 entry:
   %0 = atomicrmw xor i32* %p, i32 %v seq_cst
   ret i32 %0
 }
 
-; CHECK-LABEL: test_load_and_32
-; CHECK: membar
-; CHECK: and
-; CHECK-NOT: xor
-; CHECK: cas [%o0]
-; CHECK: membar
+; SPARC-LABEL: test_load_and_32
+; SPARC: membar
+; SPARC: and
+; SPARC-NOT: xor
+; SPARC: cas [%o0]
+; SPARC: membar
+; SPARC64-LABEL: test_load_and_32
+; SPARC64: membar
+; SPARC64: and
+; SPARC64-NOT: xor
+; SPARC64: cas [%o0]
+; SPARC64: membar
 define zeroext i32 @test_load_and_32(i32* %p, i32 zeroext %v) {
 entry:
   %0 = atomicrmw and i32* %p, i32 %v seq_cst
   ret i32 %0
 }
 
-; CHECK-LABEL: test_load_nand_32
-; CHECK: membar
-; CHECK: and
-; CHECK: xor
-; CHECK: cas [%o0]
-; CHECK: membar
+; SPARC-LABEL: test_load_nand_32
+; SPARC: membar
+; SPARC: and
+; SPARC: xor
+; SPARC: cas [%o0]
+; SPARC: membar
+; SPARC64-LABEL: test_load_nand_32
+; SPARC64: membar
+; SPARC64: and
+; SPARC64: xor
+; SPARC64: cas [%o0]
+; SPARC64: membar
 define zeroext i32 @test_load_nand_32(i32* %p, i32 zeroext %v) {
 entry:
   %0 = atomicrmw nand i32* %p, i32 %v seq_cst
   ret i32 %0
 }
 
-; CHECK-LABEL: test_load_umin_32
-; CHECK: membar
-; CHECK: cmp
-; CHECK: movleu %icc
-; CHECK: cas [%o0]
-; CHECK: membar
+; SPARC-LABEL: test_load_umin_32
+; SPARC: membar
+; SPARC: cmp
+; SPARC: movleu %icc
+; SPARC: cas [%o0]
+; SPARC: membar
+; SPARC64-LABEL: test_load_umin_32
+; SPARC64: membar
+; SPARC64: cmp
+; SPARC64: movleu %icc
+; SPARC64: cas [%o0]
+; SPARC64: membar
 define zeroext i32 @test_load_umin_32(i32* %p, i32 zeroext %v) {
 entry:
   %0 = atomicrmw umin i32* %p, i32 %v seq_cst

diff  --git a/llvm/test/CodeGen/SPARC/branches-v9.ll b/llvm/test/CodeGen/SPARC/branches-v9.ll
new file mode 100644
index 0000000000000..b1315407194dd
--- /dev/null
+++ b/llvm/test/CodeGen/SPARC/branches-v9.ll
@@ -0,0 +1,96 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=sparcv9 -disable-sparc-leaf-proc | FileCheck %s
+
+;; 1. When emitting code for v9, branches should always explicitly specify
+;;    %icc or %xcc.
+;; 2. There should never be a `ba` that jumps into two instructions immediately
+;;    following it.
+
+define void @i(i32 signext %sel) {
+; CHECK-LABEL: i:
+; CHECK:         .cfi_startproc
+; CHECK-NEXT:  ! %bb.0: ! %entry
+; CHECK-NEXT:    save %sp, -176, %sp
+; CHECK-NEXT:    .cfi_def_cfa_register %fp
+; CHECK-NEXT:    .cfi_window_save
+; CHECK-NEXT:    .cfi_register %o7, %i7
+; CHECK-NEXT:    cmp %i0, 0
+; CHECK-NEXT:    be %icc, .LBB0_2
+; CHECK-NEXT:    nop
+; CHECK-NEXT:  ! %bb.1: ! %fbb
+; CHECK-NEXT:    call f2
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    ba %icc, .LBB0_3
+; CHECK-NEXT:    nop
+; CHECK-NEXT:  .LBB0_2: ! %tbb
+; CHECK-NEXT:    call f1
+; CHECK-NEXT:    nop
+; CHECK-NEXT:  .LBB0_3: ! %end
+; CHECK-NEXT:    call f3
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    restore
+entry:
+  %cond = icmp eq i32 %sel, 0
+  br i1 %cond, label %tbb, label %fbb
+
+fbb:
+  call void @f2()
+  br label %end
+
+tbb:
+  call void @f1()
+  br label %end
+
+end:
+  call void @f3()
+  ret void
+}
+
+define void @l(i64 %sel) {
+; CHECK-LABEL: l:
+; CHECK:         .cfi_startproc
+; CHECK-NEXT:  ! %bb.0: ! %entry
+; CHECK-NEXT:    save %sp, -176, %sp
+; CHECK-NEXT:    .cfi_def_cfa_register %fp
+; CHECK-NEXT:    .cfi_window_save
+; CHECK-NEXT:    .cfi_register %o7, %i7
+; CHECK-NEXT:    cmp %i0, 0
+; CHECK-NEXT:    be %xcc, .LBB1_2
+; CHECK-NEXT:    nop
+; CHECK-NEXT:  ! %bb.1: ! %fbb
+; CHECK-NEXT:    call f2
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    ba %icc, .LBB1_3
+; CHECK-NEXT:    nop
+; CHECK-NEXT:  .LBB1_2: ! %tbb
+; CHECK-NEXT:    call f1
+; CHECK-NEXT:    nop
+; CHECK-NEXT:  .LBB1_3: ! %end
+; CHECK-NEXT:    call f3
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    restore
+entry:
+  %cond = icmp eq i64 %sel, 0
+  br i1 %cond, label %tbb, label %fbb
+
+fbb:
+  call void @f2()
+  br label %end
+
+tbb:
+  call void @f1()
+  br label %end
+
+end:
+  call void @f3()
+  ret void
+}
+
+declare void @f1(...)
+
+declare void @f2(...)
+
+declare void @f3(...)
+

diff  --git a/llvm/test/CodeGen/SPARC/hard-quad-float.ll b/llvm/test/CodeGen/SPARC/hard-quad-float.ll
index 7bb94583af147..d2ca374cf2cc6 100644
--- a/llvm/test/CodeGen/SPARC/hard-quad-float.ll
+++ b/llvm/test/CodeGen/SPARC/hard-quad-float.ll
@@ -8,7 +8,7 @@ define fp128 @fpselect_softfloat(i32 signext %0, fp128 %1, fp128 %2) #0 {
 ; SPARC64-NEXT:    cmp %o0, 0
 ; SPARC64-NEXT:    fmovd %f8, %f0
 ; SPARC64-NEXT:    fmovd %f10, %f2
-; SPARC64-NEXT:    be .LBB0_2
+; SPARC64-NEXT:    be %icc, .LBB0_2
 ; SPARC64-NEXT:    nop
 ; SPARC64-NEXT:  ! %bb.1:
 ; SPARC64-NEXT:    fmovd %f4, %f0

diff  --git a/llvm/test/CodeGen/SPARC/missinglabel.ll b/llvm/test/CodeGen/SPARC/missinglabel.ll
index 792af8eec3bdf..75bbe976152fd 100644
--- a/llvm/test/CodeGen/SPARC/missinglabel.ll
+++ b/llvm/test/CodeGen/SPARC/missinglabel.ll
@@ -11,13 +11,11 @@ define void @f(i64 %a0) align 2 {
 ; CHECK-NEXT:    cmp %o0, 0
 ; CHECK-NEXT:    be %xcc, .LBB0_2
 ; CHECK-NEXT:    nop
-; CHECK-NEXT:    ba .LBB0_1
-; CHECK-NEXT:    nop
-; CHECK-NEXT:  .LBB0_1: ! %cond.false
+; CHECK-NEXT:  ! %bb.1: ! %cond.false
 ; CHECK-NEXT:  .LBB0_2: ! %targetblock
 ; CHECK-NEXT:    mov %g0, %o0
 ; CHECK-NEXT:    cmp %o0, 0
-; CHECK-NEXT:    bne .LBB0_4
+; CHECK-NEXT:    bne %icc, .LBB0_4
 ; CHECK-NEXT:    nop
 ; CHECK-NEXT:  ! %bb.3: ! %cond.false.i83
 ; CHECK-NEXT:  .LBB0_4: ! %exit.i85


        


More information about the llvm-commits mailing list