[llvm] ce92b2f - [Xtensa] Implement lowering SELECT_CC, SETCC. (#97017)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 11 00:46:28 PDT 2024


Author: Andrei Safronov
Date: 2024-07-11T09:46:24+02:00
New Revision: ce92b2f594809c39c37bf3ef3d881f2b33bd6730

URL: https://github.com/llvm/llvm-project/commit/ce92b2f594809c39c37bf3ef3d881f2b33bd6730
DIFF: https://github.com/llvm/llvm-project/commit/ce92b2f594809c39c37bf3ef3d881f2b33bd6730.diff

LOG: [Xtensa] Implement lowering SELECT_CC, SETCC. (#97017)

Added: 
    llvm/test/CodeGen/Xtensa/select-cc.ll
    llvm/test/CodeGen/Xtensa/setcc.ll

Modified: 
    llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
    llvm/lib/Target/Xtensa/XtensaISelLowering.h
    llvm/lib/Target/Xtensa/XtensaInstrInfo.td
    llvm/lib/Target/Xtensa/XtensaOperators.td
    llvm/test/CodeGen/Xtensa/brcc.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
index 6509793012504..5d5a34157cc9f 100644
--- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
@@ -85,11 +85,19 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM,
   // indirect jump.
   setOperationAction(ISD::BR_JT, MVT::Other, Custom);
 
-  setOperationPromotedToType(ISD::BR_CC, MVT::i1, MVT::i32);
   setOperationAction(ISD::BR_CC, MVT::i32, Legal);
   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
 
+  setOperationAction(ISD::SELECT, MVT::i32, Expand);
+  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
+  setOperationAction(ISD::SETCC, MVT::i32, Expand);
+
+  setCondCodeAction(ISD::SETGT, MVT::i32, Expand);
+  setCondCodeAction(ISD::SETLE, MVT::i32, Expand);
+  setCondCodeAction(ISD::SETUGT, MVT::i32, Expand);
+  setCondCodeAction(ISD::SETULE, MVT::i32, Expand);
+
   // Implement custom stack allocations
   setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
   // Implement custom stack save and restore
@@ -514,6 +522,50 @@ XtensaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
   return DAG.getNode(XtensaISD::RET, DL, MVT::Other, RetOps);
 }
 
+static unsigned getBranchOpcode(ISD::CondCode Cond) {
+  switch (Cond) {
+  case ISD::SETEQ:
+    return Xtensa::BEQ;
+  case ISD::SETNE:
+    return Xtensa::BNE;
+  case ISD::SETLT:
+    return Xtensa::BLT;
+  case ISD::SETLE:
+    return Xtensa::BGE;
+  case ISD::SETGT:
+    return Xtensa::BLT;
+  case ISD::SETGE:
+    return Xtensa::BGE;
+  case ISD::SETULT:
+    return Xtensa::BLTU;
+  case ISD::SETULE:
+    return Xtensa::BGEU;
+  case ISD::SETUGT:
+    return Xtensa::BLTU;
+  case ISD::SETUGE:
+    return Xtensa::BGEU;
+  default:
+    llvm_unreachable("Unknown branch kind");
+  }
+}
+
+SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op,
+                                             SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  EVT Ty = Op.getOperand(0).getValueType();
+  SDValue LHS = Op.getOperand(0);
+  SDValue RHS = Op.getOperand(1);
+  SDValue TrueValue = Op.getOperand(2);
+  SDValue FalseValue = Op.getOperand(3);
+  ISD::CondCode CC = cast<CondCodeSDNode>(Op->getOperand(4))->get();
+
+  unsigned BrOpcode = getBranchOpcode(CC);
+  SDValue TargetCC = DAG.getConstant(BrOpcode, DL, MVT::i32);
+
+  return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueValue,
+                     FalseValue, TargetCC);
+}
+
 SDValue XtensaTargetLowering::LowerImmediate(SDValue Op,
                                              SelectionDAG &DAG) const {
   const ConstantSDNode *CN = cast<ConstantSDNode>(Op);
@@ -676,6 +728,8 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op,
     return LowerJumpTable(Op, DAG);
   case ISD::ConstantPool:
     return LowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
+  case ISD::SELECT_CC:
+    return LowerSELECT_CC(Op, DAG);
   case ISD::STACKSAVE:
     return LowerSTACKSAVE(Op, DAG);
   case ISD::STACKRESTORE:
@@ -697,6 +751,86 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const {
     return "XtensaISD::PCREL_WRAPPER";
   case XtensaISD::RET:
     return "XtensaISD::RET";
+  case XtensaISD::SELECT_CC:
+    return "XtensaISD::SELECT_CC";
   }
   return nullptr;
 }
+
+//===----------------------------------------------------------------------===//
+// Custom insertion
+//===----------------------------------------------------------------------===//
+
+MachineBasicBlock *
+XtensaTargetLowering::emitSelectCC(MachineInstr &MI,
+                                   MachineBasicBlock *MBB) const {
+  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+  DebugLoc DL = MI.getDebugLoc();
+
+  MachineOperand &LHS = MI.getOperand(1);
+  MachineOperand &RHS = MI.getOperand(2);
+  MachineOperand &TrueValue = MI.getOperand(3);
+  MachineOperand &FalseValue = MI.getOperand(4);
+  unsigned BrKind = MI.getOperand(5).getImm();
+
+  // To "insert" a SELECT_CC instruction, we actually have to insert
+  // CopyMBB and SinkMBB  blocks and add branch to MBB. We build phi
+  // operation in SinkMBB like phi (TrueVakue,FalseValue), where TrueValue
+  // is passed from MMB and FalseValue is passed from CopyMBB.
+  //   MBB
+  //   |   \
+  //   |   CopyMBB
+  //   |   /
+  //   SinkMBB
+  // The incoming instruction knows the
+  // destination vreg to set, the condition code register to branch on, the
+  // true/false values to select between, and a branch opcode to use.
+  const BasicBlock *LLVM_BB = MBB->getBasicBlock();
+  MachineFunction::iterator It = ++MBB->getIterator();
+
+  MachineFunction *F = MBB->getParent();
+  MachineBasicBlock *CopyMBB = F->CreateMachineBasicBlock(LLVM_BB);
+  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+
+  F->insert(It, CopyMBB);
+  F->insert(It, SinkMBB);
+
+  // Transfer the remainder of MBB and its successor edges to SinkMBB.
+  SinkMBB->splice(SinkMBB->begin(), MBB,
+                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
+  SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+  MBB->addSuccessor(CopyMBB);
+  MBB->addSuccessor(SinkMBB);
+
+  BuildMI(MBB, DL, TII.get(BrKind))
+      .addReg(LHS.getReg())
+      .addReg(RHS.getReg())
+      .addMBB(SinkMBB);
+
+  CopyMBB->addSuccessor(SinkMBB);
+
+  //  SinkMBB:
+  //   %Result = phi [ %FalseValue, CopyMBB ], [ %TrueValue, MBB ]
+  //  ...
+
+  BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII.get(Xtensa::PHI),
+          MI.getOperand(0).getReg())
+      .addReg(FalseValue.getReg())
+      .addMBB(CopyMBB)
+      .addReg(TrueValue.getReg())
+      .addMBB(MBB);
+
+  MI.eraseFromParent(); // The pseudo instruction is gone now.
+  return SinkMBB;
+}
+
+MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter(
+    MachineInstr &MI, MachineBasicBlock *MBB) const {
+  switch (MI.getOpcode()) {
+  case Xtensa::SELECT:
+    return emitSelectCC(MI, MBB);
+  default:
+    llvm_unreachable("Unexpected instr type to insert");
+  }
+}

diff  --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h
index 23a0217daaa96..dd811ae9f3a77 100644
--- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h
+++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h
@@ -33,7 +33,13 @@ enum {
   // Wraps a TargetGlobalAddress that should be loaded using PC-relative
   // accesses.  Operand 0 is the address.
   PCREL_WRAPPER,
-  RET
+  RET,
+
+  // Select with condition operator - This selects between a true value and
+  // a false value (ops #2 and #3) based on the boolean result of comparing
+  // the lhs and rhs (ops #0 and #1) of a conditional expression with the
+  // condition code in op #4
+  SELECT_CC,
 };
 }
 
@@ -44,6 +50,13 @@ class XtensaTargetLowering : public TargetLowering {
   explicit XtensaTargetLowering(const TargetMachine &TM,
                                 const XtensaSubtarget &STI);
 
+  EVT getSetCCResultType(const DataLayout &, LLVMContext &,
+                         EVT VT) const override {
+    if (!VT.isVector())
+      return MVT::i32;
+    return VT.changeVectorElementTypeToInteger();
+  }
+
   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
 
   const char *getTargetNodeName(unsigned Opcode) const override;
@@ -71,6 +84,10 @@ class XtensaTargetLowering : public TargetLowering {
 
   const XtensaSubtarget &getSubtarget() const { return Subtarget; }
 
+  MachineBasicBlock *
+  EmitInstrWithCustomInserter(MachineInstr &MI,
+                              MachineBasicBlock *BB) const override;
+
 private:
   const XtensaSubtarget &Subtarget;
 
@@ -86,6 +103,8 @@ class XtensaTargetLowering : public TargetLowering {
 
   SDValue LowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const;
 
+  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+
   SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
 
   SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
@@ -95,6 +114,9 @@ class XtensaTargetLowering : public TargetLowering {
   SDValue getAddrPCRel(SDValue Op, SelectionDAG &DAG) const;
 
   CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
+
+  MachineBasicBlock *emitSelectCC(MachineInstr &MI,
+                                  MachineBasicBlock *BB) const;
 };
 
 } // end namespace llvm

diff  --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
index f68d20dcdd54a..fc134e794153b 100644
--- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
+++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
@@ -416,15 +416,6 @@ def BBSI : RRI8_Inst<0x07, (outs),
   let imm8 = target;
 }
 
-def : Pat<(brcc SETGT, AR:$s, AR:$t, bb:$target),
-          (BLT AR:$t, AR:$s, bb:$target)>;
-def : Pat<(brcc SETUGT, AR:$s, AR:$t, bb:$target),
-          (BLTU AR:$t, AR:$s, bb:$target)>;
-def : Pat<(brcc SETLE, AR:$s, AR:$t, bb:$target),
-          (BGE AR:$t, AR:$s, bb:$target)>;
-def : Pat<(brcc SETULE, AR:$s, AR:$t, bb:$target),
-          (BGEU AR:$t, AR:$s, bb:$target)>;
-
 //===----------------------------------------------------------------------===//
 // Call and jump instructions
 //===----------------------------------------------------------------------===//
@@ -574,3 +565,12 @@ let Defs = [SP], Uses = [SP] in {
                                "#ADJCALLSTACKUP",
                                [(Xtensa_callseq_end timm:$amt1, timm:$amt2)]>;
 }
+
+//===----------------------------------------------------------------------===//
+// Generic select instruction
+//===----------------------------------------------------------------------===//
+let usesCustomInserter = 1 in {
+  def SELECT : Pseudo<(outs AR:$dst), (ins AR:$lhs, AR:$rhs, AR:$t, AR:$f, i32imm:$cond),
+                     "!select $dst, $lhs, $rhs, $t, $f, $cond",
+                     [(set i32:$dst, (Xtensa_select_cc i32:$lhs, i32:$rhs, i32:$t, i32:$f, imm:$cond))]>;
+}

diff  --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td
index 88d3c9dfe7fd8..93cd1c933dbde 100644
--- a/llvm/lib/Target/Xtensa/XtensaOperators.td
+++ b/llvm/lib/Target/Xtensa/XtensaOperators.td
@@ -19,6 +19,11 @@ def SDT_XtensaWrapPtr             : SDTypeProfile<1, 1,
 
 def SDT_XtensaBrJT                : SDTypeProfile<0, 2,
                                                  [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
+
+def SDT_XtensaSelectCC            : SDTypeProfile<1, 5,
+                                                 [SDTCisSameAs<0, 1>,
+                                                  SDTCisSameAs<2, 3>,
+                                                  SDTCisVT<5, i32>]>;
 //===----------------------------------------------------------------------===//
 // Node definitions
 //===----------------------------------------------------------------------===//
@@ -38,3 +43,6 @@ def Xtensa_callseq_end  : SDNode<"ISD::CALLSEQ_END",   SDT_XtensaCallSeqEnd,
                                  SDNPOutGlue]>;
 
 def Xtensa_brjt: SDNode<"XtensaISD::BR_JT", SDT_XtensaBrJT, [SDNPHasChain]>;
+
+def Xtensa_select_cc: SDNode<"XtensaISD::SELECT_CC", SDT_XtensaSelectCC,
+                            [SDNPInGlue]>;

diff  --git a/llvm/test/CodeGen/Xtensa/brcc.ll b/llvm/test/CodeGen/Xtensa/brcc.ll
index 83f6dfd1aebc3..8bbc39c536c56 100644
--- a/llvm/test/CodeGen/Xtensa/brcc.ll
+++ b/llvm/test/CodeGen/Xtensa/brcc.ll
@@ -1,11 +1,17 @@
-; RUN: llc -march=xtensa < %s | FileCheck %s
+; RUN: llc -mtriple=xtensa -disable-block-placement -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
 
-; CHECK-LABEL: brcc1:
-; CHECK: bge   a3, a2, .LBB0_2
-; CHECK: addi  a2, a2, 4
-; CHECK: .LBB0_2:
-define i32 @brcc1(i32 %a, i32 %b) nounwind {
-entry:
+define i32 @brcc_sgt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: brcc_sgt:
+; CHECK:         bge a3, a2, .LBB0_2
+; CHECK-NEXT:    j .LBB0_1
+; CHECK-NEXT:  .LBB0_1: # %t1
+; CHECK-NEXT:    addi a2, a2, 4
+; CHECK-NEXT:    j .LBB0_3
+; CHECK-NEXT:  .LBB0_2: # %t2
+; CHECK-NEXT:    addi a2, a3, 8
+; CHECK-NEXT:  .LBB0_3: # %exit
+; CHECK-NEXT:    ret
   %wb = icmp sgt i32 %a, %b
   br i1 %wb, label %t1, label %t2
 t1:
@@ -19,12 +25,17 @@ exit:
   ret i32 %v
 }
 
-; CHECK-LABEL: brcc2
-; CHECK: bgeu  a3, a2, .LBB1_2
-; CHECK: addi  a2, a2, 4
-; CHECK: .LBB1_2:
-define i32 @brcc2(i32 %a, i32 %b) nounwind {
-entry:
+define i32 @brcc_ugt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: brcc_ugt:
+; CHECK:         bgeu a3, a2, .LBB1_2
+; CHECK-NEXT:    j .LBB1_1
+; CHECK-NEXT:  .LBB1_1: # %t1
+; CHECK-NEXT:    addi a2, a2, 4
+; CHECK-NEXT:    j .LBB1_3
+; CHECK-NEXT:  .LBB1_2: # %t2
+; CHECK-NEXT:    addi a2, a3, 8
+; CHECK-NEXT:  .LBB1_3: # %exit
+; CHECK-NEXT:    ret
   %wb = icmp ugt i32 %a, %b
   br i1 %wb, label %t1, label %t2
 t1:
@@ -38,12 +49,17 @@ exit:
   ret i32 %v
 }
 
-; CHECK-LABEL: brcc3:
-; CHECK: blt   a3, a2, .LBB2_2
-; CHECK: addi  a2, a2, 4
-; CHECK: .LBB2_2:
-define i32 @brcc3(i32 %a, i32 %b) nounwind {
-entry:
+define i32 @brcc_sle(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: brcc_sle:
+; CHECK:         blt a3, a2, .LBB2_2
+; CHECK-NEXT:    j .LBB2_1
+; CHECK-NEXT:  .LBB2_1: # %t1
+; CHECK-NEXT:    addi a2, a2, 4
+; CHECK-NEXT:    j .LBB2_3
+; CHECK-NEXT:  .LBB2_2: # %t2
+; CHECK-NEXT:    addi a2, a3, 8
+; CHECK-NEXT:  .LBB2_3: # %exit
+; CHECK-NEXT:    ret
   %wb = icmp sle i32 %a, %b
   br i1 %wb, label %t1, label %t2
 t1:
@@ -57,12 +73,17 @@ exit:
   ret i32 %v
 }
 
-; CHECK-LABEL: brcc4
-; CHECK: bltu  a3, a2, .LBB3_2
-; CHECK: addi  a2, a2, 4
-; CHECK: .LBB3_2:
-define i32 @brcc4(i32 %a, i32 %b) nounwind {
-entry:
+define i32 @brcc_ule(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: brcc_ule:
+; CHECK:         bltu a3, a2, .LBB3_2
+; CHECK-NEXT:    j .LBB3_1
+; CHECK-NEXT:  .LBB3_1: # %t1
+; CHECK-NEXT:    addi a2, a2, 4
+; CHECK-NEXT:    j .LBB3_3
+; CHECK-NEXT:  .LBB3_2: # %t2
+; CHECK-NEXT:    addi a2, a3, 8
+; CHECK-NEXT:  .LBB3_3: # %exit
+; CHECK-NEXT:    ret
   %wb = icmp ule i32 %a, %b
   br i1 %wb, label %t1, label %t2
 t1:
@@ -75,3 +96,147 @@ exit:
   %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
   ret i32 %v
 }
+
+define i32 @brcc_eq(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: brcc_eq:
+; CHECK:         bne a2, a3, .LBB4_2
+; CHECK-NEXT:    j .LBB4_1
+; CHECK-NEXT:  .LBB4_1: # %t1
+; CHECK-NEXT:    addi a2, a2, 4
+; CHECK-NEXT:    j .LBB4_3
+; CHECK-NEXT:  .LBB4_2: # %t2
+; CHECK-NEXT:    addi a2, a3, 8
+; CHECK-NEXT:  .LBB4_3: # %exit
+; CHECK-NEXT:    ret
+  %wb = icmp eq i32 %a, %b
+  br i1 %wb, label %t1, label %t2
+t1:
+  %t1v = add i32 %a, 4
+  br label %exit
+t2:
+  %t2v = add i32 %b, 8
+  br label %exit
+exit:
+  %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+  ret i32 %v
+}
+
+define i32 @brcc_ne(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: brcc_ne:
+; CHECK:         beq a2, a3, .LBB5_2
+; CHECK-NEXT:    j .LBB5_1
+; CHECK-NEXT:  .LBB5_1: # %t1
+; CHECK-NEXT:    addi a2, a2, 4
+; CHECK-NEXT:    j .LBB5_3
+; CHECK-NEXT:  .LBB5_2: # %t2
+; CHECK-NEXT:    addi a2, a3, 8
+; CHECK-NEXT:  .LBB5_3: # %exit
+; CHECK-NEXT:    ret
+  %wb = icmp ne i32 %a, %b
+  br i1 %wb, label %t1, label %t2
+t1:
+  %t1v = add i32 %a, 4
+  br label %exit
+t2:
+  %t2v = add i32 %b, 8
+  br label %exit
+exit:
+  %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+  ret i32 %v
+}
+
+define i32 @brcc_ge(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: brcc_ge:
+; CHECK:         blt a2, a3, .LBB6_2
+; CHECK-NEXT:    j .LBB6_1
+; CHECK-NEXT:  .LBB6_1: # %t1
+; CHECK-NEXT:    addi a2, a2, 4
+; CHECK-NEXT:    j .LBB6_3
+; CHECK-NEXT:  .LBB6_2: # %t2
+; CHECK-NEXT:    addi a2, a3, 8
+; CHECK-NEXT:  .LBB6_3: # %exit
+; CHECK-NEXT:    ret
+  %wb = icmp sge i32 %a, %b
+  br i1 %wb, label %t1, label %t2
+t1:
+  %t1v = add i32 %a, 4
+  br label %exit
+t2:
+  %t2v = add i32 %b, 8
+  br label %exit
+exit:
+  %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+  ret i32 %v
+}
+
+define i32 @brcc_lt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: brcc_lt:
+; CHECK:         bge a2, a3, .LBB7_2
+; CHECK-NEXT:    j .LBB7_1
+; CHECK-NEXT:  .LBB7_1: # %t1
+; CHECK-NEXT:    addi a2, a2, 4
+; CHECK-NEXT:    j .LBB7_3
+; CHECK-NEXT:  .LBB7_2: # %t2
+; CHECK-NEXT:    addi a2, a3, 8
+; CHECK-NEXT:  .LBB7_3: # %exit
+; CHECK-NEXT:    ret
+  %wb = icmp slt i32 %a, %b
+  br i1 %wb, label %t1, label %t2
+t1:
+  %t1v = add i32 %a, 4
+  br label %exit
+t2:
+  %t2v = add i32 %b, 8
+  br label %exit
+exit:
+  %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+  ret i32 %v
+}
+
+define i32 @brcc_uge(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: brcc_uge:
+; CHECK:         bltu a2, a3, .LBB8_2
+; CHECK-NEXT:    j .LBB8_1
+; CHECK-NEXT:  .LBB8_1: # %t1
+; CHECK-NEXT:    addi a2, a2, 4
+; CHECK-NEXT:    j .LBB8_3
+; CHECK-NEXT:  .LBB8_2: # %t2
+; CHECK-NEXT:    addi a2, a3, 8
+; CHECK-NEXT:  .LBB8_3: # %exit
+; CHECK-NEXT:    ret
+  %wb = icmp uge i32 %a, %b
+  br i1 %wb, label %t1, label %t2
+t1:
+  %t1v = add i32 %a, 4
+  br label %exit
+t2:
+  %t2v = add i32 %b, 8
+  br label %exit
+exit:
+  %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+  ret i32 %v
+}
+
+define i32 @brcc_ult(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: brcc_ult:
+; CHECK:         bgeu a2, a3, .LBB9_2
+; CHECK-NEXT:    j .LBB9_1
+; CHECK-NEXT:  .LBB9_1: # %t1
+; CHECK-NEXT:    addi a2, a2, 4
+; CHECK-NEXT:    j .LBB9_3
+; CHECK-NEXT:  .LBB9_2: # %t2
+; CHECK-NEXT:    addi a2, a3, 8
+; CHECK-NEXT:  .LBB9_3: # %exit
+; CHECK-NEXT:    ret
+  %wb = icmp ult i32 %a, %b
+  br i1 %wb, label %t1, label %t2
+t1:
+  %t1v = add i32 %a, 4
+  br label %exit
+t2:
+  %t2v = add i32 %b, 8
+  br label %exit
+exit:
+  %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+  ret i32 %v
+}

diff  --git a/llvm/test/CodeGen/Xtensa/select-cc.ll b/llvm/test/CodeGen/Xtensa/select-cc.ll
new file mode 100644
index 0000000000000..812e6a5b852ea
--- /dev/null
+++ b/llvm/test/CodeGen/Xtensa/select-cc.ll
@@ -0,0 +1,510 @@
+; RUN: llc -mtriple=xtensa -disable-block-placement -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+define i32 @f_eq(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_eq:
+; CHECK:         l32i a8, a3, 0
+; CHECK-NEXT:    beq a2, a8, .LBB0_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a8, a8
+; CHECK-NEXT:  .LBB0_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp eq i32 %a, %val1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_ne(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ne:
+; CHECK:         l32i a8, a3, 0
+; CHECK-NEXT:    bne a2, a8, .LBB1_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a8, a8
+; CHECK-NEXT:  .LBB1_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp ne i32 %a, %val1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_ugt(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ugt:
+; CHECK:         or a8, a2, a2
+; CHECK-NEXT:    l32i a2, a3, 0
+; CHECK-NEXT:    bgeu a2, a8, .LBB2_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a8, a8
+; CHECK-NEXT:  .LBB2_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp ugt i32 %a, %val1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_uge(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_uge:
+; CHECK:         l32i a8, a3, 0
+; CHECK-NEXT:    bgeu a2, a8, .LBB3_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a8, a8
+; CHECK-NEXT:  .LBB3_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp uge i32 %a, %val1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_ult(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ult:
+; CHECK:         l32i a8, a3, 0
+; CHECK-NEXT:    bltu a2, a8, .LBB4_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a8, a8
+; CHECK-NEXT:  .LBB4_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp ult i32 %a, %val1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_ule(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ule:
+; CHECK:         or a8, a2, a2
+; CHECK-NEXT:    l32i a2, a3, 0
+; CHECK-NEXT:    bltu a2, a8, .LBB5_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a8, a8
+; CHECK-NEXT:  .LBB5_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp ule i32 %a, %val1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_sgt(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sgt:
+; CHECK:         or a8, a2, a2
+; CHECK-NEXT:    l32i a2, a3, 0
+; CHECK-NEXT:    bge a2, a8, .LBB6_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a8, a8
+; CHECK-NEXT:  .LBB6_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp sgt i32 %a, %val1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_sge(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sge:
+; CHECK:         l32i a8, a3, 0
+; CHECK-NEXT:    bge a2, a8, .LBB7_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a8, a8
+; CHECK-NEXT:  .LBB7_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp sge i32 %a, %val1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_slt(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_slt:
+; CHECK:         l32i a8, a3, 0
+; CHECK-NEXT:    blt a2, a8, .LBB8_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a8, a8
+; CHECK-NEXT:  .LBB8_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp slt i32 %a, %val1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_sle(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sle:
+; CHECK:         or a8, a2, a2
+; CHECK-NEXT:    l32i a2, a3, 0
+; CHECK-NEXT:    blt a2, a8, .LBB9_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a8, a8
+; CHECK-NEXT:  .LBB9_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp sle i32 %a, %val1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_slt_imm(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_slt_imm:
+; CHECK:         movi a8, 1
+; CHECK-NEXT:    blt a2, a8, .LBB10_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a2, a3, 0
+; CHECK-NEXT:  .LBB10_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp slt i32 %a, 1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_sgt_imm(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sgt_imm:
+; CHECK:         or a8, a2, a2
+; CHECK-NEXT:    l32i a2, a3, 0
+; CHECK-NEXT:    movi a9, -1
+; CHECK-NEXT:    bge a9, a8, .LBB11_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a8, a8
+; CHECK-NEXT:  .LBB11_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp sgt i32 %a, -1
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+define i32 @f_ult_imm(i32 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ult_imm:
+; CHECK:         movi a8, 1024
+; CHECK-NEXT:    bltu a2, a8, .LBB12_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a2, a3, 0
+; CHECK-NEXT:  .LBB12_2:
+; CHECK-NEXT:    ret
+  %val1 = load i32, ptr %b
+  %tst1 = icmp ult i32 %a, 1024
+  %val2 = select i1 %tst1, i32 %a, i32 %val1
+  ret i32 %val2
+}
+
+; Tests for i64 operands
+
+define i64 @f_eq_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_eq_i64:
+; CHECK:         l32i a8, a4, 4
+; CHECK-NEXT:    xor a9, a3, a8
+; CHECK-NEXT:    l32i a11, a4, 0
+; CHECK-NEXT:    xor a10, a2, a11
+; CHECK-NEXT:    or a9, a10, a9
+; CHECK-NEXT:    movi a10, 0
+; CHECK-NEXT:    beq a9, a10, .LBB13_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a11, a11
+; CHECK-NEXT:  .LBB13_2:
+; CHECK-NEXT:    beq a9, a10, .LBB13_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    or a3, a8, a8
+; CHECK-NEXT:  .LBB13_4:
+; CHECK-NEXT:    ret
+  %val1 = load i64, ptr %b
+  %tst1 = icmp eq i64 %a, %val1
+  %val2 = select i1 %tst1, i64 %a, i64 %val1
+  ret i64 %val2
+}
+
+define i64 @f_ne_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ne_i64:
+; CHECK:         l32i a8, a4, 4
+; CHECK-NEXT:    xor a9, a3, a8
+; CHECK-NEXT:    l32i a11, a4, 0
+; CHECK-NEXT:    xor a10, a2, a11
+; CHECK-NEXT:    or a9, a10, a9
+; CHECK-NEXT:    movi a10, 0
+; CHECK-NEXT:    bne a9, a10, .LBB14_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a2, a11, a11
+; CHECK-NEXT:  .LBB14_2:
+; CHECK-NEXT:    bne a9, a10, .LBB14_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    or a3, a8, a8
+; CHECK-NEXT:  .LBB14_4:
+; CHECK-NEXT:    ret
+  %val1 = load i64, ptr %b
+  %tst1 = icmp ne i64 %a, %val1
+  %val2 = select i1 %tst1, i64 %a, i64 %val1
+  ret i64 %val2
+}
+
+define i64 @f_ugt_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ugt_i64:
+; CHECK:         l32i a8, a4, 4
+; CHECK-NEXT:    movi a9, 0
+; CHECK-NEXT:    movi a10, 1
+; CHECK-NEXT:    or a7, a10, a10
+; CHECK-NEXT:    bltu a8, a3, .LBB15_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a7, a9, a9
+; CHECK-NEXT:  .LBB15_2:
+; CHECK-NEXT:    l32i a11, a4, 0
+; CHECK-NEXT:    bltu a11, a2, .LBB15_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    or a10, a9, a9
+; CHECK-NEXT:  .LBB15_4:
+; CHECK-NEXT:    beq a3, a8, .LBB15_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    or a10, a7, a7
+; CHECK-NEXT:  .LBB15_6:
+; CHECK-NEXT:    bne a10, a9, .LBB15_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    or a2, a11, a11
+; CHECK-NEXT:  .LBB15_8:
+; CHECK-NEXT:    bne a10, a9, .LBB15_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    or a3, a8, a8
+; CHECK-NEXT:  .LBB15_10:
+; CHECK-NEXT:    ret
+  %val1 = load i64, ptr %b
+  %tst1 = icmp ugt i64 %a, %val1
+  %val2 = select i1 %tst1, i64 %a, i64 %val1
+  ret i64 %val2
+}
+
+define i64 @f_uge_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_uge_i64:
+; CHECK:         l32i a8, a4, 4
+; CHECK-NEXT:    movi a9, 0
+; CHECK-NEXT:    movi a10, 1
+; CHECK-NEXT:    or a7, a10, a10
+; CHECK-NEXT:    bgeu a3, a8, .LBB16_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a7, a9, a9
+; CHECK-NEXT:  .LBB16_2:
+; CHECK-NEXT:    l32i a11, a4, 0
+; CHECK-NEXT:    bgeu a2, a11, .LBB16_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    or a10, a9, a9
+; CHECK-NEXT:  .LBB16_4:
+; CHECK-NEXT:    beq a3, a8, .LBB16_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    or a10, a7, a7
+; CHECK-NEXT:  .LBB16_6:
+; CHECK-NEXT:    bne a10, a9, .LBB16_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    or a2, a11, a11
+; CHECK-NEXT:  .LBB16_8:
+; CHECK-NEXT:    bne a10, a9, .LBB16_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    or a3, a8, a8
+; CHECK-NEXT:  .LBB16_10:
+; CHECK-NEXT:    ret
+  %val1 = load i64, ptr %b
+  %tst1 = icmp uge i64 %a, %val1
+  %val2 = select i1 %tst1, i64 %a, i64 %val1
+  ret i64 %val2
+}
+
+define i64 @f_ult_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ult_i64:
+; CHECK:         l32i a8, a4, 4
+; CHECK-NEXT:    movi a9, 0
+; CHECK-NEXT:    movi a10, 1
+; CHECK-NEXT:    or a7, a10, a10
+; CHECK-NEXT:    bltu a3, a8, .LBB17_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a7, a9, a9
+; CHECK-NEXT:  .LBB17_2:
+; CHECK-NEXT:    l32i a11, a4, 0
+; CHECK-NEXT:    bltu a2, a11, .LBB17_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    or a10, a9, a9
+; CHECK-NEXT:  .LBB17_4:
+; CHECK-NEXT:    beq a3, a8, .LBB17_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    or a10, a7, a7
+; CHECK-NEXT:  .LBB17_6:
+; CHECK-NEXT:    bne a10, a9, .LBB17_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    or a2, a11, a11
+; CHECK-NEXT:  .LBB17_8:
+; CHECK-NEXT:    bne a10, a9, .LBB17_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    or a3, a8, a8
+; CHECK-NEXT:  .LBB17_10:
+; CHECK-NEXT:    ret
+  %val1 = load i64, ptr %b
+  %tst1 = icmp ult i64 %a, %val1
+  %val2 = select i1 %tst1, i64 %a, i64 %val1
+  ret i64 %val2
+}
+
+define i64 @f_ule_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ule_i64:
+; CHECK:         l32i a8, a4, 4
+; CHECK-NEXT:    movi a9, 0
+; CHECK-NEXT:    movi a10, 1
+; CHECK-NEXT:    or a7, a10, a10
+; CHECK-NEXT:    bgeu a8, a3, .LBB18_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a7, a9, a9
+; CHECK-NEXT:  .LBB18_2:
+; CHECK-NEXT:    l32i a11, a4, 0
+; CHECK-NEXT:    bgeu a11, a2, .LBB18_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    or a10, a9, a9
+; CHECK-NEXT:  .LBB18_4:
+; CHECK-NEXT:    beq a3, a8, .LBB18_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    or a10, a7, a7
+; CHECK-NEXT:  .LBB18_6:
+; CHECK-NEXT:    bne a10, a9, .LBB18_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    or a2, a11, a11
+; CHECK-NEXT:  .LBB18_8:
+; CHECK-NEXT:    bne a10, a9, .LBB18_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    or a3, a8, a8
+; CHECK-NEXT:  .LBB18_10:
+; CHECK-NEXT:    ret
+  %val1 = load i64, ptr %b
+  %tst1 = icmp ule i64 %a, %val1
+  %val2 = select i1 %tst1, i64 %a, i64 %val1
+  ret i64 %val2
+}
+
+define i64 @f_sgt_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sgt_i64:
+; CHECK:         l32i a8, a4, 4
+; CHECK-NEXT:    movi a9, 0
+; CHECK-NEXT:    movi a10, 1
+; CHECK-NEXT:    or a7, a10, a10
+; CHECK-NEXT:    blt a8, a3, .LBB19_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a7, a9, a9
+; CHECK-NEXT:  .LBB19_2:
+; CHECK-NEXT:    l32i a11, a4, 0
+; CHECK-NEXT:    bltu a11, a2, .LBB19_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    or a10, a9, a9
+; CHECK-NEXT:  .LBB19_4:
+; CHECK-NEXT:    beq a3, a8, .LBB19_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    or a10, a7, a7
+; CHECK-NEXT:  .LBB19_6:
+; CHECK-NEXT:    bne a10, a9, .LBB19_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    or a2, a11, a11
+; CHECK-NEXT:  .LBB19_8:
+; CHECK-NEXT:    bne a10, a9, .LBB19_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    or a3, a8, a8
+; CHECK-NEXT:  .LBB19_10:
+; CHECK-NEXT:    ret
+  %val1 = load i64, ptr %b
+  %tst1 = icmp sgt i64 %a, %val1
+  %val2 = select i1 %tst1, i64 %a, i64 %val1
+  ret i64 %val2
+}
+
+define i64 @f_sge_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sge_i64:
+; CHECK:         l32i a8, a4, 4
+; CHECK-NEXT:    movi a9, 0
+; CHECK-NEXT:    movi a10, 1
+; CHECK-NEXT:    or a7, a10, a10
+; CHECK-NEXT:    bge a3, a8, .LBB20_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a7, a9, a9
+; CHECK-NEXT:  .LBB20_2:
+; CHECK-NEXT:    l32i a11, a4, 0
+; CHECK-NEXT:    bgeu a2, a11, .LBB20_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    or a10, a9, a9
+; CHECK-NEXT:  .LBB20_4:
+; CHECK-NEXT:    beq a3, a8, .LBB20_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    or a10, a7, a7
+; CHECK-NEXT:  .LBB20_6:
+; CHECK-NEXT:    bne a10, a9, .LBB20_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    or a2, a11, a11
+; CHECK-NEXT:  .LBB20_8:
+; CHECK-NEXT:    bne a10, a9, .LBB20_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    or a3, a8, a8
+; CHECK-NEXT:  .LBB20_10:
+; CHECK-NEXT:    ret
+  %val1 = load i64, ptr %b
+  %tst1 = icmp sge i64 %a, %val1
+  %val2 = select i1 %tst1, i64 %a, i64 %val1
+  ret i64 %val2
+}
+
+define i64 @f_slt_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_slt_i64:
+; CHECK:         l32i a8, a4, 4
+; CHECK-NEXT:    movi a9, 0
+; CHECK-NEXT:    movi a10, 1
+; CHECK-NEXT:    or a7, a10, a10
+; CHECK-NEXT:    blt a3, a8, .LBB21_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a7, a9, a9
+; CHECK-NEXT:  .LBB21_2:
+; CHECK-NEXT:    l32i a11, a4, 0
+; CHECK-NEXT:    bltu a2, a11, .LBB21_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    or a10, a9, a9
+; CHECK-NEXT:  .LBB21_4:
+; CHECK-NEXT:    beq a3, a8, .LBB21_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    or a10, a7, a7
+; CHECK-NEXT:  .LBB21_6:
+; CHECK-NEXT:    bne a10, a9, .LBB21_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    or a2, a11, a11
+; CHECK-NEXT:  .LBB21_8:
+; CHECK-NEXT:    bne a10, a9, .LBB21_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    or a3, a8, a8
+; CHECK-NEXT:  .LBB21_10:
+; CHECK-NEXT:    ret
+  %val1 = load i64, ptr %b
+  %tst1 = icmp slt i64 %a, %val1
+  %val2 = select i1 %tst1, i64 %a, i64 %val1
+  ret i64 %val2
+}
+
+define i64 @f_sle_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sle_i64:
+; CHECK:         l32i a8, a4, 4
+; CHECK-NEXT:    movi a9, 0
+; CHECK-NEXT:    movi a10, 1
+; CHECK-NEXT:    or a7, a10, a10
+; CHECK-NEXT:    bge a8, a3, .LBB22_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    or a7, a9, a9
+; CHECK-NEXT:  .LBB22_2:
+; CHECK-NEXT:    l32i a11, a4, 0
+; CHECK-NEXT:    bgeu a11, a2, .LBB22_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    or a10, a9, a9
+; CHECK-NEXT:  .LBB22_4:
+; CHECK-NEXT:    beq a3, a8, .LBB22_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    or a10, a7, a7
+; CHECK-NEXT:  .LBB22_6:
+; CHECK-NEXT:    bne a10, a9, .LBB22_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    or a2, a11, a11
+; CHECK-NEXT:  .LBB22_8:
+; CHECK-NEXT:    bne a10, a9, .LBB22_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    or a3, a8, a8
+; CHECK-NEXT:  .LBB22_10:
+; CHECK-NEXT:    ret
+  %val1 = load i64, ptr %b
+  %tst1 = icmp sle i64 %a, %val1
+  %val2 = select i1 %tst1, i64 %a, i64 %val1
+  ret i64 %val2
+}

diff  --git a/llvm/test/CodeGen/Xtensa/setcc.ll b/llvm/test/CodeGen/Xtensa/setcc.ll
new file mode 100644
index 0000000000000..05eb80e041fbe
--- /dev/null
+++ b/llvm/test/CodeGen/Xtensa/setcc.ll
@@ -0,0 +1,704 @@
+; RUN: llc < %s -mtriple=xtensa -O0 | FileCheck %s
+
+define i32 @f_eq(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_eq:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    beq a2, a3, .LBB0_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB0_2:
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp eq i32 %a, %b
+  %res = zext i1 %cond to i32
+  ret i32 %res
+}
+
+define i32 @f_slt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_slt:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    blt a2, a3, .LBB1_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB1_2:
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp slt i32 %a, %b
+  %res = zext i1 %cond to i32
+  ret i32 %res
+}
+
+define i32 @f_sle(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_sle:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    bge a3, a2, .LBB2_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB2_2:
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp sle i32 %a, %b
+  %res = zext i1 %cond to i32
+  ret i32 %res
+}
+
+define i32 @f_sgt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_sgt:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    blt a3, a2, .LBB3_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB3_2:
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp sgt i32 %a, %b
+  %res = zext i1 %cond to i32
+  ret i32 %res
+}
+
+define i32 @f_sge(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_sge:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    bge a2, a3, .LBB4_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB4_2:
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp sge i32 %a, %b
+  %res = zext i1 %cond to i32
+  ret i32 %res
+}
+
+define i32 @f_ne(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_ne:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    bne a2, a3, .LBB5_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB5_2:
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp ne i32 %a, %b
+  %res = zext i1 %cond to i32
+  ret i32 %res
+}
+
+define i32 @f_ult(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_ult:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    bltu a2, a3, .LBB6_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB6_2:
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp ult i32 %a, %b
+  %res = zext i1 %cond to i32
+  ret i32 %res
+}
+
+define i32 @f_ule(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_ule:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    bgeu a3, a2, .LBB7_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB7_2:
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp ule i32 %a, %b
+  %res = zext i1 %cond to i32
+  ret i32 %res
+}
+
+define i32 @f_ugt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_ugt:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    bltu a3, a2, .LBB8_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB8_2:
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp ugt i32 %a, %b
+  %res = zext i1 %cond to i32
+  ret i32 %res
+}
+
+define i32 @f_uge(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_uge:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    bgeu a2, a3, .LBB9_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB9_2:
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp uge i32 %a, %b
+  %res = zext i1 %cond to i32
+  ret i32 %res
+}
+
+
+; Tests for i64 operands
+
+define i64 @f_eq_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_eq_i64:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    # kill: def $a8 killed $a5
+; CHECK-NEXT:    # kill: def $a8 killed $a4
+; CHECK-NEXT:    # kill: def $a8 killed $a3
+; CHECK-NEXT:    # kill: def $a8 killed $a2
+; CHECK-NEXT:    xor a9, a3, a5
+; CHECK-NEXT:    xor a8, a2, a4
+; CHECK-NEXT:    or a8, a8, a9
+; CHECK-NEXT:    movi a10, 1
+; CHECK-NEXT:    movi a9, 0
+; CHECK-NEXT:    s32i a9, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a10, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    beq a8, a9, .LBB10_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB10_2:
+; CHECK-NEXT:    l32i a3, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp eq i64 %a, %b
+  %res = zext i1 %cond to i64
+  ret i64 %res
+}
+
+define i64 @f_slt_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_slt_i64:
+; CHECK:         addi a8, a1, -48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT:    # kill: def $a8 killed $a5
+; CHECK-NEXT:    # kill: def $a8 killed $a3
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:    blt a3, a5, .LBB11_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB11_2:
+; CHECK-NEXT:    l32i a8, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:    bltu a8, a9, .LBB11_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB11_4:
+; CHECK-NEXT:    l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    beq a8, a9, .LBB11_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB11_6:
+; CHECK-NEXT:    l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp slt i64 %a, %b
+  %res = zext i1 %cond to i64
+  ret i64 %res
+}
+
+define i64 @f_sle_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_sle_i64:
+; CHECK:         addi a8, a1, -48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT:    # kill: def $a8 killed $a5
+; CHECK-NEXT:    # kill: def $a8 killed $a3
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:    bge a5, a3, .LBB12_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB12_2:
+; CHECK-NEXT:    l32i a8, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:    bgeu a8, a9, .LBB12_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB12_4:
+; CHECK-NEXT:    l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    beq a8, a9, .LBB12_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB12_6:
+; CHECK-NEXT:    l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp sle i64 %a, %b
+  %res = zext i1 %cond to i64
+  ret i64 %res
+}
+
+define i64 @f_sgt_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_sgt_i64:
+; CHECK:         addi a8, a1, -48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT:    # kill: def $a8 killed $a5
+; CHECK-NEXT:    # kill: def $a8 killed $a3
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:    blt a5, a3, .LBB13_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB13_2:
+; CHECK-NEXT:    l32i a8, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:    bltu a8, a9, .LBB13_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB13_4:
+; CHECK-NEXT:    l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    beq a8, a9, .LBB13_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB13_6:
+; CHECK-NEXT:    l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp sgt i64 %a, %b
+  %res = zext i1 %cond to i64
+  ret i64 %res
+}
+
+define i64 @f_sge_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_sge_i64:
+; CHECK:         addi a8, a1, -48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT:    # kill: def $a8 killed $a5
+; CHECK-NEXT:    # kill: def $a8 killed $a3
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:    bge a3, a5, .LBB14_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB14_2:
+; CHECK-NEXT:    l32i a8, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:    bgeu a8, a9, .LBB14_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB14_4:
+; CHECK-NEXT:    l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    beq a8, a9, .LBB14_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB14_6:
+; CHECK-NEXT:    l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp sge i64 %a, %b
+  %res = zext i1 %cond to i64
+  ret i64 %res
+}
+
+define i64 @f_ne_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_ne_i64:
+; CHECK:         addi a8, a1, -16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    # kill: def $a8 killed $a5
+; CHECK-NEXT:    # kill: def $a8 killed $a4
+; CHECK-NEXT:    # kill: def $a8 killed $a3
+; CHECK-NEXT:    # kill: def $a8 killed $a2
+; CHECK-NEXT:    xor a9, a3, a5
+; CHECK-NEXT:    xor a8, a2, a4
+; CHECK-NEXT:    or a8, a8, a9
+; CHECK-NEXT:    movi a10, 1
+; CHECK-NEXT:    movi a9, 0
+; CHECK-NEXT:    s32i a9, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a10, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    bne a8, a9, .LBB15_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB15_2:
+; CHECK-NEXT:    l32i a3, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 16
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp ne i64 %a, %b
+  %res = zext i1 %cond to i64
+  ret i64 %res
+}
+
+define i64 @f_ult_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_ult_i64:
+; CHECK:         addi a8, a1, -48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT:    # kill: def $a8 killed $a5
+; CHECK-NEXT:    # kill: def $a8 killed $a3
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:    bltu a3, a5, .LBB16_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB16_2:
+; CHECK-NEXT:    l32i a8, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:    bltu a8, a9, .LBB16_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB16_4:
+; CHECK-NEXT:    l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    beq a8, a9, .LBB16_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB16_6:
+; CHECK-NEXT:    l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp ult i64 %a, %b
+  %res = zext i1 %cond to i64
+  ret i64 %res
+}
+
+define i64 @f_ule_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_ule_i64:
+; CHECK:         addi a8, a1, -48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT:    # kill: def $a8 killed $a5
+; CHECK-NEXT:    # kill: def $a8 killed $a3
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:    bgeu a5, a3, .LBB17_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB17_2:
+; CHECK-NEXT:    l32i a8, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:    bgeu a8, a9, .LBB17_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB17_4:
+; CHECK-NEXT:    l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    beq a8, a9, .LBB17_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB17_6:
+; CHECK-NEXT:    l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp ule i64 %a, %b
+  %res = zext i1 %cond to i64
+  ret i64 %res
+}
+
+define i64 @f_ugt_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_ugt_i64:
+; CHECK:         addi a8, a1, -48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT:    # kill: def $a8 killed $a5
+; CHECK-NEXT:    # kill: def $a8 killed $a3
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:    bltu a5, a3, .LBB18_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB18_2:
+; CHECK-NEXT:    l32i a8, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:    bltu a8, a9, .LBB18_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB18_4:
+; CHECK-NEXT:    l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    beq a8, a9, .LBB18_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB18_6:
+; CHECK-NEXT:    l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp ugt i64 %a, %b
+  %res = zext i1 %cond to i64
+  ret i64 %res
+}
+
+define i64 @f_uge_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_uge_i64:
+; CHECK:         addi a8, a1, -48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT:    # kill: def $a8 killed $a5
+; CHECK-NEXT:    # kill: def $a8 killed $a3
+; CHECK-NEXT:    movi a8, 0
+; CHECK-NEXT:    s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT:    movi a8, 1
+; CHECK-NEXT:    s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:    bgeu a3, a5, .LBB19_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB19_2:
+; CHECK-NEXT:    l32i a8, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT:    s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:    bgeu a8, a9, .LBB19_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB19_4:
+; CHECK-NEXT:    l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:    beq a8, a9, .LBB19_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT:    s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT:  .LBB19_6:
+; CHECK-NEXT:    l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT:    l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT:    addi a8, a1, 48
+; CHECK-NEXT:    or a1, a8, a8
+; CHECK-NEXT:    ret
+
+  %cond = icmp uge i64 %a, %b
+  %res = zext i1 %cond to i64
+  ret i64 %res
+}


        


More information about the llvm-commits mailing list